第T11周:优化器对比实验

120 阅读14分钟

本次主要是探究不同优化器、以及不同参数配置对模型的影响,在论文当中我们也可以进行优化器的比对,以增加论文工作量。

我的环境

  • 操作系统:CentOS7
  • 显卡:RTX3090 两张
  • 显卡驱动:550.78
  • CUDA版本: 12.4
  • 语言环境:Python3.9.19
  • 编译器:Jupyter Lab
  • 深度学习环境:
    • TensorFlow-2.17.0 (GPU版本)

一、设置GPU

import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")

if gpus:
    gpu0 = gpus[0] #如果有多个GPU,仅使用第0个GPU
    tf.config.experimental.set_memory_growth(gpu0, True) #设置GPU显存用量按需使用
    tf.config.set_visible_devices([gpu0],"GPU")

from tensorflow          import keras
import matplotlib.pyplot as plt
import pandas            as pd
import numpy             as np
import warnings,os,PIL,pathlib

warnings.filterwarnings("ignore")             #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False    # 用来正常显示负号

二、导入数据

1. 导入数据

data_dir    = "./data"
data_dir    = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*')))
print("图片总数为:",image_count)
图片总数为: 1800
batch_size = 16
img_height = 336
img_width  = 336
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.2,
    subset="training",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)
Found 1800 files belonging to 17 classes.
Using 1440 files for training.
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.2,
    subset="validation",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)
Found 1800 files belonging to 17 classes.
Using 360 files for validation.
class_names = train_ds.class_names
print(class_names)
['Angelina Jolie', 'Brad Pitt', 'Denzel Washington', 'Hugh Jackman', 'Jennifer Lawrence', 'Johnny Depp', 'Kate Winslet', 'Leonardo DiCaprio', 'Megan Fox', 'Natalie Portman', 'Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']
for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break
(16, 336, 336, 3)
(16,)

2. 配置数据集

AUTOTUNE = tf.data.AUTOTUNE

def train_preprocessing(image,label):
    return (image/255.0,label)

train_ds = (
    train_ds.cache()
    .shuffle(1000)
    .map(train_preprocessing)    # 这里可以设置预处理函数
#     .batch(batch_size)           # 在image_dataset_from_directory处已经设置了batch_size
    .prefetch(buffer_size=AUTOTUNE)
)

val_ds = (
    val_ds.cache()
    .shuffle(1000)
    .map(train_preprocessing)    # 这里可以设置预处理函数
#     .batch(batch_size)         # 在image_dataset_from_directory处已经设置了batch_size
    .prefetch(buffer_size=AUTOTUNE)
)

3. 数据可视化

plt.figure(figsize=(10, 8))  # 图形的宽为10高为5
plt.suptitle("数据展示")

for images, labels in train_ds.take(1):
    for i in range(15):
        plt.subplot(4, 5, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)

        # 显示图片
        plt.imshow(images[i])
        # 显示标签
        plt.xlabel(class_names[labels[i]-1])

plt.show()

t11_optimizer_13_1.png

三、构建模型

from tensorflow.keras.layers import Dropout,Dense,BatchNormalization
from tensorflow.keras.models import Model

def create_model(optimizer='adam'):
    # 加载预训练模型
    vgg16_base_model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
                                                                include_top=False,
                                                                input_shape=(img_width, img_height, 3),
                                                                pooling='avg')
    for layer in vgg16_base_model.layers:
        layer.trainable = False

    X = vgg16_base_model.output
    
    X = Dense(170, activation='relu')(X)
    X = BatchNormalization()(X)
    X = Dropout(0.5)(X)

    output = Dense(len(class_names), activation='softmax')(X)
    vgg16_model = Model(inputs=vgg16_base_model.input, outputs=output)

    vgg16_model.compile(optimizer=optimizer,
                        loss='sparse_categorical_crossentropy',
                        metrics=['accuracy'])
    return vgg16_model

model1 = create_model(optimizer=tf.keras.optimizers.Adam())
model2.summary()
Model: "model_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_2 (InputLayer)        [(None, 336, 336, 3)]     0         
                                                                 
 block1_conv1 (Conv2D)       (None, 336, 336, 64)      1792      
                                                                 
 block1_conv2 (Conv2D)       (None, 336, 336, 64)      36928     
                                                                 
 block1_pool (MaxPooling2D)  (None, 168, 168, 64)      0         
                                                                 
 block2_conv1 (Conv2D)       (None, 168, 168, 128)     73856     
                                                                 
 block2_conv2 (Conv2D)       (None, 168, 168, 128)     147584    
                                                                 
 block2_pool (MaxPooling2D)  (None, 84, 84, 128)       0         
                                                                 
 block3_conv1 (Conv2D)       (None, 84, 84, 256)       295168    
                                                                 
 block3_conv2 (Conv2D)       (None, 84, 84, 256)       590080    
                                                                 
 block3_conv3 (Conv2D)       (None, 84, 84, 256)       590080    
                                                                 
 block3_pool (MaxPooling2D)  (None, 42, 42, 256)       0         
                                                                 
 block4_conv1 (Conv2D)       (None, 42, 42, 512)       1180160   
                                                                 
 block4_conv2 (Conv2D)       (None, 42, 42, 512)       2359808   
                                                                 
 block4_conv3 (Conv2D)       (None, 42, 42, 512)       2359808   
                                                                 
 block4_pool (MaxPooling2D)  (None, 21, 21, 512)       0         
                                                                 
 block5_conv1 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_conv2 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_conv3 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_pool (MaxPooling2D)  (None, 10, 10, 512)       0         
                                                                 
 global_average_pooling2d_1  (None, 512)               0         
  (GlobalAveragePooling2D)                                       
                                                                 
 dense_2 (Dense)             (None, 170)               87210     
                                                                 
 batch_normalization_1 (Bat  (None, 170)               680       
 chNormalization)                                                
                                                                 
 dropout_1 (Dropout)         (None, 170)               0         
                                                                 
 dense_3 (Dense)             (None, 17)                2907      
                                                                 
=================================================================
Total params: 14805485 (56.48 MB)
Trainable params: 90457 (353.35 KB)
Non-trainable params: 14715028 (56.13 MB)
_________________________________________________________________
model2 = create_model(optimizer=tf.keras.optimizers.SGD())
model2.summary()
Model: "model_3"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_4 (InputLayer)        [(None, 336, 336, 3)]     0         
                                                                 
 block1_conv1 (Conv2D)       (None, 336, 336, 64)      1792      
                                                                 
 block1_conv2 (Conv2D)       (None, 336, 336, 64)      36928     
                                                                 
 block1_pool (MaxPooling2D)  (None, 168, 168, 64)      0         
                                                                 
 block2_conv1 (Conv2D)       (None, 168, 168, 128)     73856     
                                                                 
 block2_conv2 (Conv2D)       (None, 168, 168, 128)     147584    
                                                                 
 block2_pool (MaxPooling2D)  (None, 84, 84, 128)       0         
                                                                 
 block3_conv1 (Conv2D)       (None, 84, 84, 256)       295168    
                                                                 
 block3_conv2 (Conv2D)       (None, 84, 84, 256)       590080    
                                                                 
 block3_conv3 (Conv2D)       (None, 84, 84, 256)       590080    
                                                                 
 block3_pool (MaxPooling2D)  (None, 42, 42, 256)       0         
                                                                 
 block4_conv1 (Conv2D)       (None, 42, 42, 512)       1180160   
                                                                 
 block4_conv2 (Conv2D)       (None, 42, 42, 512)       2359808   
                                                                 
 block4_conv3 (Conv2D)       (None, 42, 42, 512)       2359808   
                                                                 
 block4_pool (MaxPooling2D)  (None, 21, 21, 512)       0         
                                                                 
 block5_conv1 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_conv2 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_conv3 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_pool (MaxPooling2D)  (None, 10, 10, 512)       0         
                                                                 
 global_average_pooling2d_3  (None, 512)               0         
  (GlobalAveragePooling2D)                                       
                                                                 
 dense_6 (Dense)             (None, 170)               87210     
                                                                 
 batch_normalization_3 (Bat  (None, 170)               680       
 chNormalization)                                                
                                                                 
 dropout_3 (Dropout)         (None, 170)               0         
                                                                 
 dense_7 (Dense)             (None, 17)                2907      
                                                                 
=================================================================
Total params: 14805485 (56.48 MB)
Trainable params: 90457 (353.35 KB)
Non-trainable params: 14715028 (56.13 MB)
_________________________________________________________________

四、训练模型

NO_EPOCHS = 50

history_model1  = model1.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
history_model2  = model2.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
Epoch 1/50

89/90 [============================>.] - ETA: 0s - loss: 2.8112 - accuracy: 0.1629

90/90 [==============================] - 20s 71ms/step - loss: 2.8091 - accuracy: 0.1632 - val_loss: 2.6673 - val_accuracy: 0.1389
Epoch 2/50
90/90 [==============================] - 5s 61ms/step - loss: 2.0878 - accuracy: 0.3486 - val_loss: 2.4607 - val_accuracy: 0.1444
Epoch 3/50
90/90 [==============================] - 5s 58ms/step - loss: 1.7481 - accuracy: 0.4493 - val_loss: 2.1831 - val_accuracy: 0.3639
Epoch 4/50
90/90 [==============================] - 5s 57ms/step - loss: 1.5039 - accuracy: 0.5042 - val_loss: 1.9119 - val_accuracy: 0.4361
Epoch 5/50
90/90 [==============================] - 5s 58ms/step - loss: 1.3550 - accuracy: 0.5625 - val_loss: 1.8546 - val_accuracy: 0.4000
Epoch 6/50
90/90 [==============================] - 5s 58ms/step - loss: 1.2120 - accuracy: 0.6292 - val_loss: 1.7413 - val_accuracy: 0.4194
Epoch 7/50
90/90 [==============================] - 5s 59ms/step - loss: 1.1211 - accuracy: 0.6389 - val_loss: 1.7147 - val_accuracy: 0.4611
Epoch 8/50
90/90 [==============================] - 5s 58ms/step - loss: 0.9904 - accuracy: 0.6931 - val_loss: 1.6629 - val_accuracy: 0.4667
Epoch 9/50
90/90 [==============================] - 5s 61ms/step - loss: 0.9338 - accuracy: 0.7104 - val_loss: 1.9737 - val_accuracy: 0.4111
Epoch 10/50
90/90 [==============================] - 6s 63ms/step - loss: 0.8578 - accuracy: 0.7493 - val_loss: 1.3729 - val_accuracy: 0.5778
Epoch 11/50
90/90 [==============================] - 5s 60ms/step - loss: 0.7964 - accuracy: 0.7597 - val_loss: 1.4691 - val_accuracy: 0.5194
Epoch 12/50
90/90 [==============================] - 5s 59ms/step - loss: 0.7524 - accuracy: 0.7715 - val_loss: 1.6860 - val_accuracy: 0.5194
Epoch 13/50
90/90 [==============================] - 5s 60ms/step - loss: 0.6766 - accuracy: 0.7931 - val_loss: 1.7948 - val_accuracy: 0.4556
Epoch 14/50
90/90 [==============================] - 6s 61ms/step - loss: 0.6332 - accuracy: 0.8201 - val_loss: 1.5575 - val_accuracy: 0.5472
Epoch 15/50
90/90 [==============================] - 5s 59ms/step - loss: 0.6060 - accuracy: 0.8174 - val_loss: 1.4584 - val_accuracy: 0.5639
Epoch 16/50
90/90 [==============================] - 6s 61ms/step - loss: 0.5314 - accuracy: 0.8444 - val_loss: 1.7160 - val_accuracy: 0.5528
Epoch 17/50
90/90 [==============================] - 5s 58ms/step - loss: 0.5172 - accuracy: 0.8458 - val_loss: 2.0713 - val_accuracy: 0.4167
Epoch 18/50
90/90 [==============================] - 5s 59ms/step - loss: 0.4701 - accuracy: 0.8687 - val_loss: 1.4212 - val_accuracy: 0.6000
Epoch 19/50
90/90 [==============================] - 5s 60ms/step - loss: 0.4702 - accuracy: 0.8604 - val_loss: 1.6018 - val_accuracy: 0.5667
Epoch 20/50
90/90 [==============================] - 6s 61ms/step - loss: 0.4466 - accuracy: 0.8660 - val_loss: 1.7455 - val_accuracy: 0.5611
Epoch 21/50
90/90 [==============================] - 5s 59ms/step - loss: 0.4131 - accuracy: 0.8826 - val_loss: 1.4673 - val_accuracy: 0.5694
Epoch 22/50
90/90 [==============================] - 5s 59ms/step - loss: 0.3651 - accuracy: 0.8938 - val_loss: 1.8842 - val_accuracy: 0.5278
Epoch 23/50
90/90 [==============================] - 5s 59ms/step - loss: 0.3486 - accuracy: 0.8951 - val_loss: 1.7772 - val_accuracy: 0.5583
Epoch 24/50
90/90 [==============================] - 5s 58ms/step - loss: 0.3732 - accuracy: 0.8813 - val_loss: 1.7772 - val_accuracy: 0.5444
Epoch 25/50
90/90 [==============================] - 6s 66ms/step - loss: 0.3227 - accuracy: 0.8958 - val_loss: 2.2513 - val_accuracy: 0.4889
Epoch 26/50
90/90 [==============================] - 5s 60ms/step - loss: 0.3127 - accuracy: 0.8979 - val_loss: 1.8000 - val_accuracy: 0.5306
Epoch 27/50
90/90 [==============================] - 6s 61ms/step - loss: 0.2975 - accuracy: 0.9160 - val_loss: 1.6870 - val_accuracy: 0.5722
Epoch 28/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2677 - accuracy: 0.9229 - val_loss: 1.7194 - val_accuracy: 0.5444
Epoch 29/50
90/90 [==============================] - 6s 64ms/step - loss: 0.2689 - accuracy: 0.9208 - val_loss: 1.8195 - val_accuracy: 0.5444
Epoch 30/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2766 - accuracy: 0.9201 - val_loss: 1.6282 - val_accuracy: 0.5750
Epoch 31/50
90/90 [==============================] - 5s 60ms/step - loss: 0.2823 - accuracy: 0.9146 - val_loss: 1.5751 - val_accuracy: 0.6028
Epoch 32/50
90/90 [==============================] - 5s 60ms/step - loss: 0.2391 - accuracy: 0.9243 - val_loss: 2.5263 - val_accuracy: 0.4806
Epoch 33/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2368 - accuracy: 0.9299 - val_loss: 2.4007 - val_accuracy: 0.4861
Epoch 34/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2603 - accuracy: 0.9201 - val_loss: 2.7057 - val_accuracy: 0.4667
Epoch 35/50
90/90 [==============================] - 5s 60ms/step - loss: 0.2218 - accuracy: 0.9299 - val_loss: 2.4357 - val_accuracy: 0.4889
Epoch 36/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2392 - accuracy: 0.9229 - val_loss: 2.0711 - val_accuracy: 0.5667
Epoch 37/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2170 - accuracy: 0.9299 - val_loss: 1.6161 - val_accuracy: 0.6139
Epoch 38/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2047 - accuracy: 0.9278 - val_loss: 2.3255 - val_accuracy: 0.5222
Epoch 39/50
90/90 [==============================] - 5s 59ms/step - loss: 0.2169 - accuracy: 0.9285 - val_loss: 2.1242 - val_accuracy: 0.5583
Epoch 40/50
90/90 [==============================] - 6s 61ms/step - loss: 0.2048 - accuracy: 0.9403 - val_loss: 2.0411 - val_accuracy: 0.5639
Epoch 41/50
90/90 [==============================] - 5s 60ms/step - loss: 0.2125 - accuracy: 0.9319 - val_loss: 2.5709 - val_accuracy: 0.5028
Epoch 42/50
90/90 [==============================] - 5s 59ms/step - loss: 0.1834 - accuracy: 0.9444 - val_loss: 2.2441 - val_accuracy: 0.5278
Epoch 43/50
90/90 [==============================] - 5s 59ms/step - loss: 0.1835 - accuracy: 0.9417 - val_loss: 2.4231 - val_accuracy: 0.5139
Epoch 44/50
90/90 [==============================] - 5s 60ms/step - loss: 0.1892 - accuracy: 0.9438 - val_loss: 2.7163 - val_accuracy: 0.4917
Epoch 45/50
90/90 [==============================] - 5s 61ms/step - loss: 0.1834 - accuracy: 0.9396 - val_loss: 2.8129 - val_accuracy: 0.4889
Epoch 46/50
90/90 [==============================] - 5s 59ms/step - loss: 0.1841 - accuracy: 0.9438 - val_loss: 2.1926 - val_accuracy: 0.5667
Epoch 47/50
90/90 [==============================] - 5s 60ms/step - loss: 0.1955 - accuracy: 0.9368 - val_loss: 2.5246 - val_accuracy: 0.5167
Epoch 48/50
90/90 [==============================] - 5s 59ms/step - loss: 0.1692 - accuracy: 0.9465 - val_loss: 1.9267 - val_accuracy: 0.6028
Epoch 49/50
90/90 [==============================] - 6s 62ms/step - loss: 0.1516 - accuracy: 0.9493 - val_loss: 2.2218 - val_accuracy: 0.5750
Epoch 50/50
90/90 [==============================] - 5s 58ms/step - loss: 0.1854 - accuracy: 0.9340 - val_loss: 4.4018 - val_accuracy: 0.3944
Epoch 1/50
90/90 [==============================] - 8s 64ms/step - loss: 3.0166 - accuracy: 0.1167 - val_loss: 2.8112 - val_accuracy: 0.1028
Epoch 2/50
90/90 [==============================] - 5s 59ms/step - loss: 2.4806 - accuracy: 0.2153 - val_loss: 2.6308 - val_accuracy: 0.2000
Epoch 3/50
90/90 [==============================] - 6s 62ms/step - loss: 2.2496 - accuracy: 0.2757 - val_loss: 2.4481 - val_accuracy: 0.2917
Epoch 4/50
90/90 [==============================] - 5s 59ms/step - loss: 2.1075 - accuracy: 0.3174 - val_loss: 2.2465 - val_accuracy: 0.3056
Epoch 5/50
90/90 [==============================] - 5s 58ms/step - loss: 1.9469 - accuracy: 0.3854 - val_loss: 2.0580 - val_accuracy: 0.3917
Epoch 6/50
90/90 [==============================] - 5s 60ms/step - loss: 1.8507 - accuracy: 0.4083 - val_loss: 1.9356 - val_accuracy: 0.4111
Epoch 7/50
90/90 [==============================] - 5s 59ms/step - loss: 1.7735 - accuracy: 0.4299 - val_loss: 1.8887 - val_accuracy: 0.4083
Epoch 8/50
90/90 [==============================] - 6s 62ms/step - loss: 1.6684 - accuracy: 0.4403 - val_loss: 1.8074 - val_accuracy: 0.3889
Epoch 9/50
90/90 [==============================] - 5s 59ms/step - loss: 1.6175 - accuracy: 0.4743 - val_loss: 1.8050 - val_accuracy: 0.4389
Epoch 10/50
90/90 [==============================] - 5s 60ms/step - loss: 1.5463 - accuracy: 0.5049 - val_loss: 1.7590 - val_accuracy: 0.4361
Epoch 11/50
90/90 [==============================] - 5s 58ms/step - loss: 1.4879 - accuracy: 0.5389 - val_loss: 1.6607 - val_accuracy: 0.4528
Epoch 12/50
90/90 [==============================] - 6s 63ms/step - loss: 1.4597 - accuracy: 0.5257 - val_loss: 1.6990 - val_accuracy: 0.4889
Epoch 13/50
90/90 [==============================] - 5s 59ms/step - loss: 1.3800 - accuracy: 0.5660 - val_loss: 1.6562 - val_accuracy: 0.4750
Epoch 14/50
90/90 [==============================] - 5s 60ms/step - loss: 1.3088 - accuracy: 0.5951 - val_loss: 1.6522 - val_accuracy: 0.4556
Epoch 15/50
90/90 [==============================] - 5s 60ms/step - loss: 1.3106 - accuracy: 0.5861 - val_loss: 1.5273 - val_accuracy: 0.5361
Epoch 16/50
90/90 [==============================] - 5s 59ms/step - loss: 1.2481 - accuracy: 0.5889 - val_loss: 1.5123 - val_accuracy: 0.5028
Epoch 17/50
90/90 [==============================] - 5s 60ms/step - loss: 1.1972 - accuracy: 0.6313 - val_loss: 1.5278 - val_accuracy: 0.4917
Epoch 18/50
90/90 [==============================] - 5s 59ms/step - loss: 1.1663 - accuracy: 0.6375 - val_loss: 1.5005 - val_accuracy: 0.4806
Epoch 19/50
90/90 [==============================] - 5s 59ms/step - loss: 1.1663 - accuracy: 0.6368 - val_loss: 1.5281 - val_accuracy: 0.4833
Epoch 20/50
90/90 [==============================] - 5s 60ms/step - loss: 1.1332 - accuracy: 0.6493 - val_loss: 1.4536 - val_accuracy: 0.5250
Epoch 21/50
90/90 [==============================] - 5s 59ms/step - loss: 1.0734 - accuracy: 0.6507 - val_loss: 1.5152 - val_accuracy: 0.4972
Epoch 22/50
90/90 [==============================] - 5s 59ms/step - loss: 1.0451 - accuracy: 0.6715 - val_loss: 1.5605 - val_accuracy: 0.4944
Epoch 23/50
90/90 [==============================] - 5s 58ms/step - loss: 1.0594 - accuracy: 0.6639 - val_loss: 1.5454 - val_accuracy: 0.5250
Epoch 24/50
90/90 [==============================] - 5s 60ms/step - loss: 0.9998 - accuracy: 0.6757 - val_loss: 1.6288 - val_accuracy: 0.4889
Epoch 25/50
90/90 [==============================] - 5s 59ms/step - loss: 1.0310 - accuracy: 0.6736 - val_loss: 1.5872 - val_accuracy: 0.5111
Epoch 26/50
90/90 [==============================] - 6s 62ms/step - loss: 0.9853 - accuracy: 0.6972 - val_loss: 1.6057 - val_accuracy: 0.4917
Epoch 27/50
90/90 [==============================] - 5s 59ms/step - loss: 0.9467 - accuracy: 0.7160 - val_loss: 1.4656 - val_accuracy: 0.5389
Epoch 28/50
90/90 [==============================] - 5s 59ms/step - loss: 0.9387 - accuracy: 0.7021 - val_loss: 1.5354 - val_accuracy: 0.5111
Epoch 29/50
90/90 [==============================] - 5s 59ms/step - loss: 0.8907 - accuracy: 0.7222 - val_loss: 1.6350 - val_accuracy: 0.5056
Epoch 30/50
90/90 [==============================] - 6s 63ms/step - loss: 0.8749 - accuracy: 0.7257 - val_loss: 1.5908 - val_accuracy: 0.4778
Epoch 31/50
90/90 [==============================] - 5s 59ms/step - loss: 0.8817 - accuracy: 0.7167 - val_loss: 1.4561 - val_accuracy: 0.5389
Epoch 32/50
90/90 [==============================] - 5s 60ms/step - loss: 0.8425 - accuracy: 0.7278 - val_loss: 1.4919 - val_accuracy: 0.5500
Epoch 33/50
90/90 [==============================] - 5s 59ms/step - loss: 0.8354 - accuracy: 0.7278 - val_loss: 1.7405 - val_accuracy: 0.4917
Epoch 34/50
90/90 [==============================] - 5s 59ms/step - loss: 0.7911 - accuracy: 0.7535 - val_loss: 1.4488 - val_accuracy: 0.5361
Epoch 35/50
90/90 [==============================] - 5s 58ms/step - loss: 0.7850 - accuracy: 0.7590 - val_loss: 1.5416 - val_accuracy: 0.5250
Epoch 36/50
90/90 [==============================] - 5s 58ms/step - loss: 0.7658 - accuracy: 0.7625 - val_loss: 1.3718 - val_accuracy: 0.5833
Epoch 37/50
90/90 [==============================] - 5s 60ms/step - loss: 0.7382 - accuracy: 0.7715 - val_loss: 1.3779 - val_accuracy: 0.5528
Epoch 38/50
90/90 [==============================] - 5s 59ms/step - loss: 0.7484 - accuracy: 0.7563 - val_loss: 1.3619 - val_accuracy: 0.5583
Epoch 39/50
90/90 [==============================] - 5s 59ms/step - loss: 0.7210 - accuracy: 0.7833 - val_loss: 1.4926 - val_accuracy: 0.5083
Epoch 40/50
90/90 [==============================] - 5s 60ms/step - loss: 0.7222 - accuracy: 0.7681 - val_loss: 1.4294 - val_accuracy: 0.5611
Epoch 41/50
90/90 [==============================] - 5s 58ms/step - loss: 0.7157 - accuracy: 0.7792 - val_loss: 1.4342 - val_accuracy: 0.5861
Epoch 42/50
90/90 [==============================] - 5s 58ms/step - loss: 0.7037 - accuracy: 0.7806 - val_loss: 1.4258 - val_accuracy: 0.5417
Epoch 43/50
90/90 [==============================] - 5s 60ms/step - loss: 0.6777 - accuracy: 0.7868 - val_loss: 1.4914 - val_accuracy: 0.5556
Epoch 44/50
90/90 [==============================] - 5s 58ms/step - loss: 0.6650 - accuracy: 0.8000 - val_loss: 1.5244 - val_accuracy: 0.5444
Epoch 45/50
90/90 [==============================] - 6s 65ms/step - loss: 0.6421 - accuracy: 0.7931 - val_loss: 1.6820 - val_accuracy: 0.4889
Epoch 46/50
90/90 [==============================] - 5s 59ms/step - loss: 0.6423 - accuracy: 0.7965 - val_loss: 1.5195 - val_accuracy: 0.5444
Epoch 47/50
90/90 [==============================] - 5s 59ms/step - loss: 0.6194 - accuracy: 0.8167 - val_loss: 1.2861 - val_accuracy: 0.5889
Epoch 48/50
90/90 [==============================] - 5s 60ms/step - loss: 0.6213 - accuracy: 0.8097 - val_loss: 1.4794 - val_accuracy: 0.5472
Epoch 49/50
90/90 [==============================] - 5s 59ms/step - loss: 0.5999 - accuracy: 0.8153 - val_loss: 1.5334 - val_accuracy: 0.5250
Epoch 50/50
90/90 [==============================] - 5s 58ms/step - loss: 0.5749 - accuracy: 0.8174 - val_loss: 1.4426 - val_accuracy: 0.5722

五、评估模型

1. Accuracy与Loss图

from matplotlib.ticker import MultipleLocator
plt.rcParams['savefig.dpi'] = 300 #图片像素
plt.rcParams['figure.dpi']  = 300 #分辨率

acc1     = history_model1.history['accuracy']
acc2     = history_model2.history['accuracy']
val_acc1 = history_model1.history['val_accuracy']
val_acc2 = history_model2.history['val_accuracy']

loss1     = history_model1.history['loss']
loss2     = history_model2.history['loss']
val_loss1 = history_model1.history['val_loss']
val_loss2 = history_model2.history['val_loss']

epochs_range = range(len(acc1))

plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, acc1, label='Training Accuracy-Adam')
plt.plot(epochs_range, acc2, label='Training Accuracy-SGD')
plt.plot(epochs_range, val_acc1, label='Validation Accuracy-Adam')
plt.plot(epochs_range, val_acc2, label='Validation Accuracy-SGD')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss1, label='Training Loss-Adam')
plt.plot(epochs_range, loss2, label='Training Loss-SGD')
plt.plot(epochs_range, val_loss1, label='Validation Loss-Adam')
plt.plot(epochs_range, val_loss2, label='Validation Loss-SGD')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
   
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))

plt.show()

t11_optimizer_20_1.png

2. 模型评估

def test_accuracy_report(model):
    score = model.evaluate(val_ds, verbose=0)
    print('Loss function: %s, accuracy:' % score[0], score[1])
    
test_accuracy_report(model2)
Loss function: 1.4425572156906128, accuracy: 0.5722222328186035
test_accuracy_report(model1)
Loss function: 4.401803493499756, accuracy: 0.39444443583488464

六、总结

  • 在模型一定的情况下,优化器的选择对准确率的提升巨大。
  • 我们应该把时间放在模型的优化上,优化器只能起到一定的提升作用。