计算机知识

当前位置:澳门新葡萄京 > 计算机知识 > 透过softmax交叉熵函数对预测向量和标签向量进行

透过softmax交叉熵函数对预测向量和标签向量进行

来源:http://www.hhmtch.com 作者:澳门新葡萄京 时间:2020-01-24 13:15

本文所使用的开源数据集(kaggle猫狗大战):

利用本文代码训练并生成的模型(对应项目中的model文件夹):

简单介绍:(需要预先安装pip install opencv-python, pip install flask, pip install tensorflow/pip install tensorflow-gpu)本文使用Python3,TensorFlow实现适合新手的VGG16模型(不了解VGG16的同学可以自行百度一下,本文没有使用slim或者keras实现,对VGG16逐层实现,便于新手理解,有经验的同学可以用高级库重写这部分)可应用于单标签分类(一张图片要么是猫,要么是狗)任务。

(预告:之后会写一篇多标签分类任务,与单标签分类有些区别

整体训练逻辑:0,使用pipeline方式异步读取训练集图片,节省内存消耗,提高效率1,将图像传入到CNN中提取特征2,将特征图拉伸输入到FC layer中得出分类预测向量3,通过softmax交叉熵函数对预测向量和标签向量进行训练,得出最终模型整体预测逻辑:1,将图像传入到CNN中提取特征2,将特征图拉伸输入到FC layer中得出分类预测向量3,将预测向量做softmax操作,取向量中的最大值,并映射到对应类别中制作成web服务:利用flask框架将整个项目启动成web服务,使得项目支持http方式调用启动服务后调用以下地址测试

后续优化逻辑:可以采用迁移学习,模型融合等方案进一步提高acc可以左右翻转图片,将训练集翻倍

运行命令:对数据集进行训练:python DogVsCat.py train对新的图片进行测试:python DogVsCat.py test启动成http服务:python DogVsCat.py start

项目整体目录结构:

图片 1项目结构

model结构:

图片 2model文件夹结构

训练过程:

图片 3epoch-1图片 4epoch-2图片 5epoch-3图片 6epoch-13图片 7epoch-13-中段图片 8epoch-13-结束

整体代码如下:

# coding:utf-8import tensorflow as tfimport os, sys, randomimport numpy as npimport cv2from flask import requestfrom flask import Flaskimport jsonapp = Flaskclass DogVsCat: def __init__: # 可调参数 self.save_epoch = 1 # 每相隔多少个epoch保存一次模型 self.train_max_num = 25000 # 训练时读取的最大图片数目 0~25000之间,内存不足的可以调小 self.epoch_max = 13 # 最大迭代epoch次数 self.batch_size = 16 # 训练时每个批次参与训练的图像数目,显存不足的可以调小 self.class_num = 2 # 分类数目,猫狗共两类 self.val_num = 20 * self.batch_size # 不能大于self.train_max_num 做验证集用 self.lr = 1e-4 # 初始学习率 # 无需修改参数 self.x_val = [] self.y_val = [] self.x = None # 每批次的图像数据 self.y = None # 每批次的one-hot标签 self.learning_rate = None # 学习率 self.sess = None # 持久化的tf.session self.pred = None # cnn网络结构的预测 self.keep_drop = tf.placeholder(tf.float32) # dropout比例 def dogOrCat(self, img_path): """ 猫狗分类 :param img_path: :return: """ im = cv2.imread im = cv2.resize(im, ) im = [im] im = np.array(im, dtype=np.float32) im -= 147 output = self.sess.run(self.output, feed_dict={self.x: im, self.keep_drop: 1.}) ret = output.tolist()[0] ret = 'It is a cat' if ret[0] <= ret[1] else 'It is a dog' return ret def test(self, img_path): """ 测试接口 :param img_path: :return: """ self.x = tf.placeholder(tf.float32, [None, 224, 224, 3]) # 输入数据 self.pred = self.CNN() self.output = tf.nn.softmax(self.pred) saver = tf.train.Saver() # tfconfig = tf.ConfigProto(allow_soft_placement=True) # tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.3 # 占用显存的比例 # self.ses = tf.Session(config=tfconfig) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer # 全局tf变量初始化 # 加载w,b参数 saver.restore(self.sess, './model/DogVsCat-13') im = cv2.imread im = cv2.resize(im, ) im = [im] im = np.array(im, dtype=np.float32) im -= 147 output = self.sess.run(self.output, feed_dict={self.x: im, self.keep_drop: 1.}) ret = output.tolist()[0] ret = 'It is a cat' if ret[0] <= ret[1] else 'It is a dog' print def train: """ 开始训练 :return: """ self.x = tf.placeholder(tf.float32, [None, 224, 224, 3]) # 输入数据 self.y = tf.placeholder(tf.float32, [None, self.class_num]) # 标签数据 self.learning_rate = tf.placeholder(tf.float32) # 学习率 # 生成训练用数据集 x_train_list, y_train_list, x_val_list, y_val_list = self.getTrainDataset() print('开始转换tensor队列') x_train_list_tensor = tf.convert_to_tensor(x_train_list, dtype=tf.string) y_train_list_tensor = tf.convert_to_tensor(y_train_list, dtype=tf.float32) x_val_list_tensor = tf.convert_to_tensor(x_val_list, dtype=tf.string) y_val_list_tensor = tf.convert_to_tensor(y_val_list, dtype=tf.float32) x_train_queue = tf.train.slice_input_producer(tensor_list=[x_train_list_tensor], shuffle=False) y_train_queue = tf.train.slice_input_producer(tensor_list=[y_train_list_tensor], shuffle=False) x_val_queue = tf.train.slice_input_producer(tensor_list=[x_val_list_tensor], shuffle=False) y_val_queue = tf.train.slice_input_producer(tensor_list=[y_val_list_tensor], shuffle=False) train_im, train_label = self.dataset_opt(x_train_queue, y_train_queue) train_batch = tf.train.batch(tensors=[train_im, train_label], batch_size=self.batch_size, num_threads=2) val_im, val_label = self.dataset_opt(x_val_queue, y_val_queue) val_batch = tf.train.batch(tensors=[val_im, val_label], batch_size=self.batch_size, num_threads=2) # VGG16网络 print self.pred = self.CNN() # 损失函数 self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y) # 优化器 self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss) # acc self.acc_tf = tf.equal(tf.argmax(self.pred, 1), tf.argmax(self.y, 1)) self.acc = tf.reduce_mean(tf.cast(self.acc_tf, tf.float32)) with tf.Session() as self.sess: # 全局tf变量初始化 self.sess.run(tf.global_variables_initializer coordinator = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=self.sess, coord=coordinator) # 模型保存 saver = tf.train.Saver() batch_max = len(x_train_list) // self.batch_size total_step = 1 for epoch_num in range(self.epoch_max): lr = self.lr * (1 - (epoch_num/self.epoch_max) ** 2) # 动态学习率 for batch_num in range(batch_max): x_train_tmp, y_train_tmp = self.sess.run(train_batch) self.sess.run(self.opt, feed_dict={self.x: x_train_tmp, self.y: y_train_tmp, self.learning_rate: lr, self.keep_drop: 0.5}) # 输出评价标准 if total_step % 20 == 0 or total_step == 1: print() print('epoch:%d/%d batch:%d/%d step:%d lr:%.10f' % ((epoch_num   1), self.epoch_max, (batch_num   1), batch_max, total_step, lr)) # 输出训练集评价 train_loss, train_acc = self.sess.run([self.loss, self.acc], feed_dict={self.x: x_train_tmp, self.y: y_train_tmp, self.keep_drop: 1.}) print('train_loss:%.10f train_acc:%.10f' % (np.mean(train_loss), train_acc)) # 输出验证集评价 val_loss_list, val_acc_list = [], [] for i in range(int(self.val_num/self.batch_size)): x_val_tmp, y_val_tmp = self.sess.run(val_batch) val_loss, val_acc = self.sess.run([self.loss, self.acc], feed_dict={self.x: x_val_tmp, self.y: y_val_tmp, self.keep_drop: 1.}) val_loss_list.append(np.mean) val_acc_list.append(np.mean print(' val_loss:%.10f val_acc:%.10f' % (np.mean, np.mean) total_step  = 1 # 保存模型 if (epoch_num   1) % self.save_epoch == 0: print('正在保存模型:') saver.save(self.sess, './model/DogVsCat', global_step=(epoch_num   1)) coordinator.request_stop() coordinator.join def CNN: """ VGG16   FC :return: """ # 权重 weight = { # 输入 batch_size*224*224*3 # 第一层 'wc1_1': tf.get_variable('wc1_1', [3, 3, 3, 64]), # 卷积 输出:batch_size*224*224*64 'wc1_2': tf.get_variable('wc1_2', [3, 3, 64, 64]), # 卷积 输出:batch_size*224*224*64 # 池化 输出:112*112*64 # 第二层 'wc2_1': tf.get_variable('wc2_1', [3, 3, 64, 128]), # 卷积 输出:batch_size*112*112*128 'wc2_2': tf.get_variable('wc2_2', [3, 3, 128, 128]), # 卷积 输出:batch_size*112*112*128 # 池化 输出:56*56*128 # 第三层 'wc3_1': tf.get_variable('wc3_1', [3, 3, 128, 256]), # 卷积 输出:batch_size*56*56*256 'wc3_2': tf.get_variable('wc3_2', [3, 3, 256, 256]), # 卷积 输出:batch_size*56*56*256 'wc3_3': tf.get_variable('wc3_3', [3, 3, 256, 256]), # 卷积 输出:batch_size*56*56*256 # 池化 输出:28*28*256 # 第四层 'wc4_1': tf.get_variable('wc4_1', [3, 3, 256, 512]), # 卷积 输出:batch_size*28*28*512 'wc4_2': tf.get_variable('wc4_2', [3, 3, 512, 512]), # 卷积 输出:batch_size*28*28*512 'wc4_3': tf.get_variable('wc4_3', [3, 3, 512, 512]), # 卷积 输出:batch_size*28*28*512 # 池化 输出:14*14*512 # 第五层 'wc5_1': tf.get_variable('wc5_1', [3, 3, 512, 512]), # 卷积 输出:batch_size*14*14*512 'wc5_2': tf.get_variable('wc5_2', [3, 3, 512, 512]), # 卷积 输出:batch_size*14*14*512 'wc5_3': tf.get_variable('wc5_3', [3, 3, 512, 512]), # 卷积 输出:batch_size*14*14*512 # 池化 输出:7*7*512 # 全链接第一层 'wfc_1': tf.get_variable('wfc_1', [7*7*512, 4096]), # 全链接第二层 'wfc_2': tf.get_variable('wfc_2', [4096, 4096]), # 全链接第三层 'wfc_3': tf.get_variable('wfc_3', [4096, self.class_num]), } # 偏移量 biase = { # 第一层 'bc1_1': tf.get_variable('bc1_1', [64]), 'bc1_2': tf.get_variable('bc1_2', [64]), # 第二层 'bc2_1': tf.get_variable('bc2_1', [128]), 'bc2_2': tf.get_variable('bc2_2', [128]), # 第三层 'bc3_1': tf.get_variable('bc3_1', [256]), 'bc3_2': tf.get_variable('bc3_2', [256]), 'bc3_3': tf.get_variable('bc3_3', [256]), # 第四层 'bc4_1': tf.get_variable('bc4_1', [512]), 'bc4_2': tf.get_variable('bc4_2', [512]), 'bc4_3': tf.get_variable('bc4_3', [512]), # 第五层 'bc5_1': tf.get_variable('bc5_1', [512]), 'bc5_2': tf.get_variable('bc5_2', [512]), 'bc5_3': tf.get_variable('bc5_3', [512]), # 全链接第一层 'bfc_1': tf.get_variable('bfc_1', [4096]), # 全链接第二层 'bfc_2': tf.get_variable('bfc_2', [4096]), # 全链接第三层 'bfc_3': tf.get_variable('bfc_3', [self.class_num]), } # 第一层 net = tf.nn.conv2d(input=self.x, filter=weight['wc1_1'], strides=[1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc1_1'])) # 加b 然后 激活 net = tf.nn.conv2d(net, filter=weight['wc1_2'], strides=[1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc1_2'])) # 加b 然后 激活 net = tf.nn.max_pool(value=net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # 池化 # 第二层 net = tf.nn.conv2d(net, weight['wc2_1'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc2_1'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc2_2'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc2_2'])) # 加b 然后 激活 net = tf.nn.max_pool(net, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID') # 池化 # 第三层 net = tf.nn.conv2d(net, weight['wc3_1'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc3_1'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc3_2'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc3_2'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc3_3'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc3_3'])) # 加b 然后 激活 net = tf.nn.max_pool(net, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID') # 池化 # 第四层 net = tf.nn.conv2d(net, weight['wc4_1'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc4_1'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc4_2'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc4_2'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc4_3'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc4_3'])) # 加b 然后 激活 net = tf.nn.max_pool(net, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID') # 池化 # 第五层 net = tf.nn.conv2d(net, weight['wc5_1'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc5_1'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc5_2'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc5_2'])) # 加b 然后 激活 net = tf.nn.conv2d(net, weight['wc5_3'], [1, 1, 1, 1], padding='SAME') # 卷积 net = tf.nn.leaky_relu(tf.nn.bias_add(net, biase['bc5_3'])) # 加b 然后 激活 net = tf.nn.max_pool(net, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID') # 池化 print('last-net', net) # 拉伸flatten,把多个图片同时分别拉伸成一条向量 net = tf.reshape(net, shape=[-1, weight['wfc_1'].get_shape print(weight['wfc_1'].get_shape print('拉伸flatten', net) # 全链接层 # fc第一层 net = tf.matmul(net, weight['wfc_1'])   biase['bfc_1'] net = tf.nn.dropout(net, self.keep_drop) net = tf.nn.relu print('fc第一层', net) # fc第二层 net = tf.matmul(net, weight['wfc_2'])   biase['bfc_2'] net = tf.nn.dropout(net, self.keep_drop) net = tf.nn.relu print('fc第二层', net) # fc第三层 net = tf.matmul(net, weight['wfc_3'])   biase['bfc_3'] print('fc第三层', net) return net def getTrainDataset: """ 整理数据集,把图像resize为224*224*3,训练集做成25000*224*224*3,把label做成one-hot形式 :return: """ train_data_list = os.listdir('./data/train_data/') print('共有%d张训练图片, 读取%d张:' % (len(train_data_list), self.train_max_num)) random.shuffle(train_data_list) # 打乱顺序 x_val_list = train_data_list[:self.val_num] y_val_list = [[0, 1] if file_name.find > -1 else [1, 0] for file_name in x_val_list] x_train_list = train_data_list[self.val_num:self.train_max_num] y_train_list = [[0, 1] if file_name.find > -1 else [1, 0] for file_name in x_train_list] return x_train_list, y_train_list, x_val_list, y_val_list def dataset_opt(self, x_train_queue, y_train_queue): """ 处理图片和标签 :param queue: :return: """ queue = x_train_queue[0] contents = tf.read_file('./data/train_data/'   queue) im = tf.image.decode_jpeg im = tf.image.resize_images(images=im, size=[224, 224]) im = tf.reshape(im, tf.stack([224, 224, 3])) im -= 147 # 去均值化 # im /= 255 # 将像素处理在0~1之间,加速收敛 # im -= 0.5 # 将像素处理在-0.5~0.5之间 return im, y_train_queue[0]if __name__ == '__main__': opt_type = sys.argv[1:][0] instance = DogVsCat() if opt_type == 'train': instance.train() elif opt_type == 'test': instance.test('./data/test1/1.jpg') elif opt_type == 'start': # 将session持久化到内存中 instance.test('./data/test1/1.jpg') # 启动web服务 # http://127.0.0.1:5050/dogOrCat?img_path=./data/test1/1.jpg @app.route('/dogOrCat', methods=['GET', 'POST']) def dogOrCat(): img_path = '' if request.method == 'POST': img_path = request.form.to_dict().get('img_path') elif request.method == 'GET': # img_path = request.args.get('img_path') img_path = request.args.to_dict().get('img_path') print ret = instance.dogOrCat print return json.dumps({'type': ret}) app.run(host='0.0.0.0', port=5050, debug=False)

本文由澳门新葡萄京发布于计算机知识,转载请注明出处:透过softmax交叉熵函数对预测向量和标签向量进行

关键词: 标签 基础 图片 新蒲京在线

上一篇:晴天的时候心情多半会很好

下一篇:没有了