这里的代码借鉴了VGG实现Keras,但是这段代码不支持多通道,并且vgg函数的扩展性不好。下面修改一下,方便进行多分支图片输入的建立,以及更见方便的调参。
# from keras.models import
from keras.layers import *
from keras.models import Input, load_model, Sequential
from keras import Model
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy
import keras.optimizers
import numpy as npdef vgg(input_shape, num_cls, filters_num, conv_nums):# print(input_shape)inputs = Input(shape=input_shape)x = inputsfor i in range(len(conv_nums)):for j in range(conv_nums[i]):x = Conv2D(filters=filters_num[i], kernel_size=3, padding='same',name='stage{0}_conv{1}'.format(i+1, j+1))(x)x = MaxPool2D((2, 2), strides=2, name='maxpool_'+str(i+1))(x)x = ZeroPadding2D((1, 1))(x)x = Flatten(name='flatten')(x)x = Dense(units=4096, name='dense4096_1')(x)x = Dense(units=4096, name='dense4096_2')(x)x = Dense(units=num_cls, name='dense1000', activation='softmax')(x)model = Model(inputs=inputs, outputs=x, name='vgg')model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['acc'])return modeldef train(net_name):path = r'C:\Users\.keras\datasets\mnist.npz'with np.load(path, allow_pickle=True) as f:x_train, y_train = f['x_train'], f['y_train']x_test, y_test = f['x_test'], f['y_test']x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32')x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32')num_classes = 10x_train = x_train / 255.x_test = x_test / 255.y_train = to_categorical(y_train, num_classes)y_test = to_categorical(y_test, num_classes)batch_size = 16epochs = 1if net_name == 'vgg-19':filters_num = [64, 128, 256, 512, 512]conv_nums = [2, 2, 4, 4, 4]else:filters_num = [32, 64, 128, 256, 512]conv_nums = [2, 2, 3, 3, 3]vgg_model = vgg(input_shape=(28, 28, 1), num_cls=num_classes, filters_num=filters_num,conv_nums=conv_nums)vgg_model.summary()vgg_model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1)vgg_model.save('{0}-mnist.h5'.format(net_name))eval_res = vgg_model.evaluate(x_test, y_test)print(eval_res)if __name__ == '__main__':train('vgg-16')
"""
@author:fuzekun
@file:VGG_Model.py
@time:2022/11/22
@description: 定义VGG的模型进行图片的训练,首先只使用rri进行训练
"""# from keras.models import
from keras.layers import *
from keras.models import Input, load_model, Sequential
from keras import Model
from keras.datasets import mnist
from keras.utils.all_utils import to_categorical
from keras.losses import categorical_crossentropy
import keras.optimizers
import numpy as np
import tensorflow as tf
"""
这里建立模型的时候,压缩到最后就没有了,个人以为是图片太小导致的,所以240的时候可以去掉zero那一层
"""def vgg(input_shape, num_cls, filters_num, conv_nums, multy):# print(input_shape)inputs = Input(shape=input_shape)x = inputsfor i in range(len(conv_nums)):for j in range(conv_nums[i]):x = Conv2D(filters=filters_num[i], kernel_size=3, padding='same')(x)x = MaxPool2D((2, 2), strides=2)(x)if input_shape[0] < 224:x = ZeroPadding2D((1, 1))(x)x = Flatten()(x)x = Dense(units=4096)(x)x = Dense(units=4096)(x)if not multy: # 单模型直接输出到类别x = Dense(units=num_cls, activation='softmax')(x)model = Model(inputs=inputs, outputs=x, name='vgg')return modeldef build_vgg(net_name, input_shape, num_classes, optimizer, filter_num=[], conv_nums=[], multy = False):if net_name == 'vgg-19':filters_num = [64, 128, 256, 512, 512]conv_nums = [2, 2, 4, 4, 4]else:filters_num = [32, 64, 128, 256, 512]conv_nums = [2, 2, 3, 3, 3]vgg_model = vgg(input_shape=input_shape, num_cls=num_classes, filters_num=filters_num,conv_nums=conv_nums, multy=multy)if not multy: # 多输入的不进行编译vgg_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])# vgg_model.summary()return vgg_model# 创建多输入的VGG模型
def build_multy_VGG(net_name, input_shape, num_classes, optimizer, n_hiddens,filter_num=[], conv_nums=[]):out_rri = build_vgg(net_name, input_shape, num_classes, optimizer, filter_num, conv_nums, True)out_edr = build_vgg(net_name, input_shape, num_classes, optimizer, filter_num, conv_nums, True)out_amp = build_vgg(net_name, input_shape, num_classes, optimizer, filter_num, conv_nums, True)# 2. 进行模型融合# print(out_rri.output)combined = concatenate([out_rri.output, out_edr.output]) # (None, 7, 7, 768)# print(combined)# 2.1融合输入x = Dense(n_hiddens, activation='relu')(combined)x = Flatten()(x)# 2.2最后输出x = Dense(num_classes, activation='softmax')(x)# 2.3模型定义完成model = Model(inputs=[out_rri.input, out_edr.input], outputs=x)model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])return modeldef train(net_name):path = r'C:\Users\.keras\datasets\mnist.npz'with np.load(path, allow_pickle=True) as f:x_train, y_train = f['x_train'], f['y_train']x_test, y_test = f['x_test'], f['y_test']x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32')x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32')num_classes = 10x_train = x_train / 255.x_test = x_test / 255.y_train = to_categorical(y_train, num_classes)y_test = to_categorical(y_test, num_classes)batch_size = 16epochs = 1lr = 0.001opt = tf.keras.optimizers.Adam(learning_rate=lr)model = build_vgg("vgg-16", input_shape=(28,28,1), num_classes=2, optimizer=opt)model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1)model.save('{0}-mnist.h5'.format(net_name))eval_res = model.evaluate(x_test, y_test)print(eval_res)if __name__ == '__main__':train('vgg-16')
上一篇:Hash表(哈希表、散列表)