深度学习笔记:深度学习CNN python程序实现
创始人
2024-06-02 22:58:34
0

加深网络

深度学习相对于一般的神经网络区别就在于使用多层隐藏层。在该例中我们构造一个基于CNN的深度学习网络,其训练完成后对于mnist数据集失败准确率可以超过99%

该网络隐藏层结构:
卷积层—ReLU—卷积层—ReLU—池化层—卷积层—ReLU—卷积层—ReLU—池化层—卷积层—ReLU—卷积层—ReLU—池化层—affine—ReLU—dropout—affine—dropout—softmax

先放上完整代码:

# coding: utf-8
import sys, os
sys.path.append("D:\AI learning source code")  # 为了导入父目录的文件而进行的设定
import pickle
import numpy as np
from collections import OrderedDict
from common.layers import *class DeepConvNet:"""识别率为99%以上的高精度的ConvNet网络结构如下所示conv - relu - conv- relu - pool -conv - relu - conv- relu - pool -conv - relu - conv- relu - pool -affine - relu - dropout - affine - dropout - softmax"""def __init__(self, input_dim=(1, 28, 28),conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},hidden_size=50, output_size=10):# 初始化权重===========# 各层的神经元平均与前一层的几个神经元有连接(TODO:自动计算)pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])wight_init_scales = np.sqrt(2.0 / pre_node_nums)  # 使用ReLU的情况下推荐的初始值self.params = {}pre_channel_num = input_dim[0]for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):self.params['W' + str(idx+1)] = wight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])pre_channel_num = conv_param['filter_num']self.params['W7'] = wight_init_scales[6] * np.random.randn(64*4*4, hidden_size)self.params['b7'] = np.zeros(hidden_size)self.params['W8'] = wight_init_scales[7] * np.random.randn(hidden_size, output_size)self.params['b8'] = np.zeros(output_size)# 生成层===========self.layers = []self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W4'], self.params['b4'],conv_param_4['stride'], conv_param_4['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W5'], self.params['b5'],conv_param_5['stride'], conv_param_5['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W6'], self.params['b6'],conv_param_6['stride'], conv_param_6['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Affine(self.params['W7'], self.params['b7']))self.layers.append(Relu())self.layers.append(Dropout(0.5))self.layers.append(Affine(self.params['W8'], self.params['b8']))self.layers.append(Dropout(0.5))self.last_layer = SoftmaxWithLoss()def predict(self, x, train_flg=False):for layer in self.layers:if isinstance(layer, Dropout):x = layer.forward(x, train_flg)else:x = layer.forward(x)return xdef loss(self, x, t):y = self.predict(x, train_flg=True)return self.last_layer.forward(y, t)def accuracy(self, x, t, batch_size=100):if t.ndim != 1 : t = np.argmax(t, axis=1)acc = 0.0for i in range(int(x.shape[0] / batch_size)):tx = x[i*batch_size:(i+1)*batch_size]tt = t[i*batch_size:(i+1)*batch_size]y = self.predict(tx, train_flg=False)y = np.argmax(y, axis=1)acc += np.sum(y == tt)return acc / x.shape[0]def gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.last_layer.backward(dout)tmp_layers = self.layers.copy()tmp_layers.reverse()for layer in tmp_layers:dout = layer.backward(dout)# 设定grads = {}for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):grads['W' + str(i+1)] = self.layers[layer_idx].dWgrads['b' + str(i+1)] = self.layers[layer_idx].dbreturn gradsdef save_params(self, file_name="params.pkl"):params = {}for key, val in self.params.items():params[key] = valwith open(file_name, 'wb') as f:pickle.dump(params, f)def load_params(self, file_name="params.pkl"):with open(file_name, 'rb') as f:params = pickle.load(f)for key, val in params.items():self.params[key] = valfor i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):self.layers[layer_idx].W = self.params['W' + str(i+1)]self.layers[layer_idx].b = self.params['b' + str(i+1)]

解析:

1

    def __init__(self, input_dim=(1, 28, 28),conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},hidden_size=50, output_size=10):

这里我们确定了各个神经网络层的形状,卷积核形状均为3 X 3,步幅为1,填充在第4个卷积层为2,其他为1. 卷积核数量1,2层为16, 3,4层为32, 5,6层为64.最后隐藏层(全连接层)神经元个数50

2

pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
wight_init_scales = np.sqrt(2.0 / pre_node_nums)  # 使用ReLU的情况下推荐的初始值

这里定义了神经网络各层的神经元个数,以及ReLU函数使用的He初始值的标准差

3

        self.params = {}pre_channel_num = input_dim[0]for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):self.params['W' + str(idx+1)] = wight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])pre_channel_num = conv_param['filter_num']self.params['W7'] = wight_init_scales[6] * np.random.randn(64*4*4, hidden_size)self.params['b7'] = np.zeros(hidden_size)self.params['W8'] = wight_init_scales[7] * np.random.randn(hidden_size, output_size)self.params['b8'] = np.zeros(output_size)

这里我们遍历形状参数列表得到各层的形状,并对各层的值进行初始化。其中卷积核与ReLU权重初始值都使用了He初始值,偏置的初始值都为0

3

        # 生成层===========self.layers = []self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W4'], self.params['b4'],conv_param_4['stride'], conv_param_4['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W5'], self.params['b5'],conv_param_5['stride'], conv_param_5['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W6'], self.params['b6'],conv_param_6['stride'], conv_param_6['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Affine(self.params['W7'], self.params['b7']))self.layers.append(Relu())self.layers.append(Dropout(0.5))self.layers.append(Affine(self.params['W8'], self.params['b8']))self.layers.append(Dropout(0.5))self.last_layer = SoftmaxWithLoss()

按照神经网络顺序搭建神经网络各层,保存为列表layers。

神经网络结构:
卷积层(16 X 3 X 3)—ReLU—卷积层(16 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—卷积层(32 X 3 X 3)—ReLU—卷积层(32 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—卷积层(64 X 3 X 3)—ReLU—卷积层(64 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—affine—ReLU—dropout(dropout比率0.5)—affine—dropout(dropout比率0.5)—softmax

4

    def predict(self, x, train_flg=False):for layer in self.layers:if isinstance(layer, Dropout):x = layer.forward(x, train_flg)else:x = layer.forward(x)return x

利用神经网络进行预测,其中train_flg为True时代表神经网络处于训练模式,在Dropout层会随机删除神经元。如为False则代表神经网络在预测状态,启用全部神经元

5

    def accuracy(self, x, t, batch_size=100):if t.ndim != 1 : t = np.argmax(t, axis=1)acc = 0.0for i in range(int(x.shape[0] / batch_size)):tx = x[i*batch_size:(i+1)*batch_size]tt = t[i*batch_size:(i+1)*batch_size]y = self.predict(tx, train_flg=False)y = np.argmax(y, axis=1)acc += np.sum(y == tt)return acc / x.shape[0]

返回每一轮batch中预测准确率

    def gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.last_layer.backward(dout)tmp_layers = self.layers.copy()tmp_layers.reverse()for layer in tmp_layers:dout = layer.backward(dout)# 设定grads = {}for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):grads['W' + str(i+1)] = self.layers[layer_idx].dWgrads['b' + str(i+1)] = self.layers[layer_idx].dbreturn grads

使用反向传播梯度下降求网络梯度并返回得到的梯度值

训练程序

# coding: utf-8
import sys, os
sys.path.append("D:\AI learning source code")  # 为了导入父目录而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from deep_convnet import DeepConvNet
from common.trainer import Trainer(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)network = DeepConvNet()  
trainer = Trainer(network, x_train, t_train, x_test, t_test,epochs=20, mini_batch_size=100,optimizer='Adam', optimizer_param={'lr':0.001},evaluate_sample_num_per_epoch=1000)
trainer.train()# 保存参数
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")

在训练程序中,我们调用DeepConvNet,使用Adam权重更新方法,0.001学习率对mnist数据集进行mini-batch训练。其中每一个batch个数为100,进行20个epoch

相关内容

热门资讯

u盘安卓系统启动,便捷移动办公... 你有没有想过,有一天你的U盘不仅能存储文件,还能变身成一个迷你电脑呢?没错,就是那种可以直接启动安卓...
安卓不让华为用12系统 你知道吗?最近安卓和华为之间可是闹出了不小的风波呢!安卓决定不让华为继续使用最新的12系统,这可让不...
小米电视系统安卓几,基于安卓X... 亲爱的读者们,你是否也和我一样,对小米电视的操作系统充满了好奇?想知道它到底运行的是哪个版本的安卓系...
intel芯片支持安卓系统吗 你有没有想过,那些在电脑上呼风唤雨的Intel芯片,是不是也能在安卓系统里大展身手呢?今天,就让我带...
安卓品牌哪个系统最好用,探寻最... 你有没有想过,手机里的安卓系统哪个品牌用起来最顺心呢?市面上这么多安卓手机,每个品牌都有自己的特色和...
安卓变苹果系统pdd,PDD系... 你知道吗?最近在互联网上掀起了一股热潮,那就是PDD(PDD,全名PDD转换器)这个神奇的小工具。它...
安卓系统电话录音权限,安卓系统... 你有没有发现,现在手机里的电话录音功能越来越普及了?这不,最近我在研究安卓系统的电话录音权限时,发现...
加速器国内安卓系统,国内安卓系... 你有没有发现,最近手机上的游戏越来越好玩了?这都得归功于那些默默无闻的加速器。今天,就让我带你一探究...
海信电视怎样到安卓系统,体验流... 亲爱的读者们,你是否曾好奇过,那些大屏幕上的智能电视是如何从传统的电视系统升级到安卓系统的呢?今天,...
安卓系统脚本开发方案,基于安卓... 你有没有想过,你的安卓手机里那些神奇的脚本是怎么来的?没错,就是安卓系统脚本开发方案!今天,就让我带...
安卓怎么运行云苹果系统,跨平台... 你有没有想过,在安卓手机上也能体验到云苹果系统的魅力呢?没错,今天就要来揭秘这个神奇的话题:安卓怎么...
安卓系统自动接收信息,便捷生活... 你知道吗?现在智能手机的功能越来越强大,我们的生活也因此变得更加便捷。但是,有时候这些强大的功能也会...
安卓系统升级不通知,不再打扰用... 你有没有遇到过这种情况?手机里安卓系统的更新通知突然消失了,就像它从人间蒸发了一样。这可真是让人又气...
苹果平板如何下载安卓系统,教你... 你有没有想过,你的苹果平板竟然也能装上安卓系统?是的,你没听错,就是那个以封闭著称的iOS系统,现在...
安卓10系统怎样升级 你有没有发现,你的安卓手机最近有点儿“懒洋洋”的?别急,别急,我来告诉你怎么给它来个“活力大升级”!...
安卓系统无法修改通知 你是不是也遇到了这个问题?安卓系统里的通知,怎么就那么固执,任凭你怎么设置,它就是不肯改头换面呢?别...
安卓平板系统游戏推荐,体验指尖... 你有没有发现,随着科技的飞速发展,安卓平板已经成为了我们生活中不可或缺的好伙伴呢?它不仅能够满足我们...
魅族系统是安卓内核,基于安卓内... 你知道吗?在手机操作系统界,有一个小众品牌可是藏着不少秘密呢!它就是魅族。今天,咱们就来聊聊魅族系统...
苹果系统是安卓的吗,揭秘两大操... 你有没有想过,那个我们每天不离手的苹果手机,它的系统竟然和安卓的不是一回事儿?没错,苹果系统和安卓系...
怎么关安卓系统广告推送,享受纯... 你是不是也被安卓手机上的广告推送搞得头疼不已?那些弹窗、横幅、悬浮窗,真是让人不胜其烦。别急,今天就...