20장 전이 학습을 통해 딥러닝의 성능 극대화하기¶
1. 소규모 데이터셋으로 만드는 강력한 학습 모델¶
실습: 치매 환자의 뇌인지 일반인의 뇌인지 예측하기¶
In [1]:
Copied!
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import optimizers
import numpy as np
import matplotlib.pyplot as plt
#학습셋의 변형을 설정하는 부분입니다.
train_datagen = ImageDataGenerator(rescale=1./255, # 주어진 이미지의 크기를 설정합니다.
horizontal_flip=True, # 수평 대칭 이미지를 50% 확률로 만들어 추가합니다.
width_shift_range=0.1, # 전체 크기의 15% 범위에서 좌우로 이동합니다.
height_shift_range=0.1, # 마찬가지로 위, 아래로 이동합니다.
#rotation_range=5, # 정해진 각도만큼 회전시킵니다.
#shear_range=0.7, # 좌표 하나를 고정시키고 나머지를 이동시킵니다.
#zoom_range=1.2, # 확대 또는 축소시킵니다.
#vertical_flip=True, # 수직 대칭 이미지를 만듭니다.
#fill_mode='nearest' # 빈 공간을 채우는 방법입니다. nearest 옵션은 가장 비슷한 색으로 채우게 됩니다.
)
train_generator = train_datagen.flow_from_directory(
'./data/train', # 학습셋이 있는 폴더의 위치입니다.
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# 테스트셋은 이미지 부풀리기 과정을 진행하지 않습니다.
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'./data/test', # 테스트셋이 있는 폴더의 위치입니다.
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# 앞서 배운 CNN 모델을 만들어 적용해 보겠습니다.
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(150,150,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import optimizers
import numpy as np
import matplotlib.pyplot as plt
#학습셋의 변형을 설정하는 부분입니다.
train_datagen = ImageDataGenerator(rescale=1./255, # 주어진 이미지의 크기를 설정합니다.
horizontal_flip=True, # 수평 대칭 이미지를 50% 확률로 만들어 추가합니다.
width_shift_range=0.1, # 전체 크기의 15% 범위에서 좌우로 이동합니다.
height_shift_range=0.1, # 마찬가지로 위, 아래로 이동합니다.
#rotation_range=5, # 정해진 각도만큼 회전시킵니다.
#shear_range=0.7, # 좌표 하나를 고정시키고 나머지를 이동시킵니다.
#zoom_range=1.2, # 확대 또는 축소시킵니다.
#vertical_flip=True, # 수직 대칭 이미지를 만듭니다.
#fill_mode='nearest' # 빈 공간을 채우는 방법입니다. nearest 옵션은 가장 비슷한 색으로 채우게 됩니다.
)
train_generator = train_datagen.flow_from_directory(
'./data/train', # 학습셋이 있는 폴더의 위치입니다.
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# 테스트셋은 이미지 부풀리기 과정을 진행하지 않습니다.
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'./data/test', # 테스트셋이 있는 폴더의 위치입니다.
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# 앞서 배운 CNN 모델을 만들어 적용해 보겠습니다.
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(150,150,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
Found 160 images belonging to 2 classes. Found 120 images belonging to 2 classes. Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 148, 148, 32) 896 _________________________________________________________________ activation (Activation) (None, 148, 148, 32) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 74, 74, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 72, 72, 32) 9248 _________________________________________________________________ activation_1 (Activation) (None, 72, 72, 32) 0 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 36, 36, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 34, 34, 64) 18496 _________________________________________________________________ activation_2 (Activation) (None, 34, 34, 64) 0 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 17, 17, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 18496) 0 _________________________________________________________________ dense (Dense) (None, 64) 1183808 _________________________________________________________________ activation_3 (Activation) (None, 64) 0 _________________________________________________________________ dropout (Dropout) (None, 64) 0 _________________________________________________________________ dense_1 (Dense) (None, 1) 65 _________________________________________________________________ activation_4 (Activation) (None, 1) 0 ================================================================= Total params: 1,212,513 Trainable params: 1,212,513 Non-trainable params: 0 _________________________________________________________________
In [2]:
Copied!
!pip install SciPy
!pip install SciPy
Requirement already satisfied: SciPy in c:\users\gilbut\anaconda3\envs\deep_learning\lib\site-packages (1.7.3) Requirement already satisfied: numpy<1.23.0,>=1.16.5 in c:\users\gilbut\anaconda3\envs\deep_learning\lib\site-packages (from SciPy) (1.19.5)
In [3]:
Copied!
#모델 실행의 옵션을 설정합니다.
model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(learning_rate=0.0002), metrics=['accuracy'])
# 학습의 조기 중단을 설정합니다.
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=5)
#모델을 실행합니다
history = model.fit(
train_generator,
epochs=100,
validation_data=test_generator,
validation_steps=10,
callbacks=[early_stopping_callback])
#모델 실행의 옵션을 설정합니다.
model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(learning_rate=0.0002), metrics=['accuracy'])
# 학습의 조기 중단을 설정합니다.
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=5)
#모델을 실행합니다
history = model.fit(
train_generator,
epochs=100,
validation_data=test_generator,
validation_steps=10,
callbacks=[early_stopping_callback])
Epoch 1/100 32/32 [==============================] - 3s 68ms/step - loss: 0.7053 - accuracy: 0.5063 - val_loss: 0.6896 - val_accuracy: 0.5000 Epoch 2/100 32/32 [==============================] - 2s 66ms/step - loss: 0.7028 - accuracy: 0.5125 - val_loss: 0.6904 - val_accuracy: 0.4800 Epoch 3/100 32/32 [==============================] - 2s 74ms/step - loss: 0.6913 - accuracy: 0.5188 - val_loss: 0.6883 - val_accuracy: 0.4800 Epoch 4/100 32/32 [==============================] - 2s 71ms/step - loss: 0.6854 - accuracy: 0.5375 - val_loss: 0.6786 - val_accuracy: 0.8200 Epoch 5/100 32/32 [==============================] - 2s 75ms/step - loss: 0.6756 - accuracy: 0.5750 - val_loss: 0.6573 - val_accuracy: 0.6400 Epoch 6/100 32/32 [==============================] - 2s 76ms/step - loss: 0.6825 - accuracy: 0.5813 - val_loss: 0.6378 - val_accuracy: 0.8400 Epoch 7/100 32/32 [==============================] - 2s 64ms/step - loss: 0.6558 - accuracy: 0.6313 - val_loss: 0.6667 - val_accuracy: 0.5000 Epoch 8/100 32/32 [==============================] - 2s 63ms/step - loss: 0.6345 - accuracy: 0.6500 - val_loss: 0.5991 - val_accuracy: 0.6400 Epoch 9/100 32/32 [==============================] - 2s 67ms/step - loss: 0.6429 - accuracy: 0.6313 - val_loss: 0.6015 - val_accuracy: 0.8200 Epoch 10/100 32/32 [==============================] - 2s 73ms/step - loss: 0.5748 - accuracy: 0.6938 - val_loss: 0.4762 - val_accuracy: 0.9200 Epoch 11/100 32/32 [==============================] - 3s 77ms/step - loss: 0.5332 - accuracy: 0.7437 - val_loss: 0.4443 - val_accuracy: 0.8400 Epoch 12/100 32/32 [==============================] - 2s 75ms/step - loss: 0.4228 - accuracy: 0.8625 - val_loss: 0.4070 - val_accuracy: 0.8400 Epoch 13/100 32/32 [==============================] - 2s 65ms/step - loss: 0.3653 - accuracy: 0.8500 - val_loss: 0.2481 - val_accuracy: 0.9600 Epoch 14/100 32/32 [==============================] - 2s 64ms/step - loss: 0.3076 - accuracy: 0.8813 - val_loss: 0.3884 - val_accuracy: 0.8400 Epoch 15/100 32/32 [==============================] - 2s 66ms/step - loss: 0.2986 - accuracy: 0.9062 - val_loss: 0.2137 - val_accuracy: 0.9200 Epoch 16/100 32/32 [==============================] - 2s 64ms/step - loss: 0.2292 - accuracy: 0.9438 - val_loss: 0.1383 - val_accuracy: 0.9600 Epoch 17/100 32/32 [==============================] - 2s 66ms/step - loss: 0.2399 - accuracy: 0.9062 - val_loss: 0.1318 - val_accuracy: 0.9800 Epoch 18/100 32/32 [==============================] - 2s 65ms/step - loss: 0.2025 - accuracy: 0.9000 - val_loss: 0.1201 - val_accuracy: 0.9800 Epoch 19/100 32/32 [==============================] - 2s 64ms/step - loss: 0.1659 - accuracy: 0.9375 - val_loss: 0.1257 - val_accuracy: 0.9800 Epoch 20/100 32/32 [==============================] - 2s 64ms/step - loss: 0.1465 - accuracy: 0.9438 - val_loss: 0.1550 - val_accuracy: 0.9000 Epoch 21/100 32/32 [==============================] - 2s 64ms/step - loss: 0.2073 - accuracy: 0.9312 - val_loss: 0.1501 - val_accuracy: 0.9400 Epoch 22/100 32/32 [==============================] - 2s 63ms/step - loss: 0.1289 - accuracy: 0.9688 - val_loss: 0.1103 - val_accuracy: 0.9200 Epoch 23/100 32/32 [==============================] - 2s 64ms/step - loss: 0.1045 - accuracy: 0.9688 - val_loss: 0.0534 - val_accuracy: 0.9800 Epoch 24/100 32/32 [==============================] - 2s 63ms/step - loss: 0.0545 - accuracy: 0.9875 - val_loss: 0.1120 - val_accuracy: 0.9600 Epoch 25/100 32/32 [==============================] - 2s 67ms/step - loss: 0.0969 - accuracy: 0.9563 - val_loss: 0.0927 - val_accuracy: 0.9600 Epoch 26/100 32/32 [==============================] - 2s 65ms/step - loss: 0.1004 - accuracy: 0.9625 - val_loss: 0.1100 - val_accuracy: 0.9600 Epoch 27/100 32/32 [==============================] - 2s 64ms/step - loss: 0.0948 - accuracy: 0.9563 - val_loss: 0.0291 - val_accuracy: 1.0000 Epoch 28/100 32/32 [==============================] - 2s 63ms/step - loss: 0.1080 - accuracy: 0.9625 - val_loss: 0.0329 - val_accuracy: 1.0000 Epoch 29/100 32/32 [==============================] - 2s 64ms/step - loss: 0.1362 - accuracy: 0.9563 - val_loss: 0.0802 - val_accuracy: 0.9800 Epoch 30/100 32/32 [==============================] - 2s 73ms/step - loss: 0.0909 - accuracy: 0.9625 - val_loss: 0.0423 - val_accuracy: 0.9800 Epoch 31/100 32/32 [==============================] - 2s 65ms/step - loss: 0.0631 - accuracy: 0.9875 - val_loss: 0.1227 - val_accuracy: 0.9600 Epoch 32/100 32/32 [==============================] - 2s 65ms/step - loss: 0.1099 - accuracy: 0.9563 - val_loss: 0.0388 - val_accuracy: 1.0000
In [4]:
Copied!
# 검증셋과 학습셋의 오차를 저장합니다.
y_vloss = history.history['val_loss']
y_loss = history.history['loss']
# 그래프로 표현해 봅니다.
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c="red", label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c="blue", label='Trainset_loss')
# 그래프에 그리드를 주고 레이블을 표시하겠습니다.
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
# 검증셋과 학습셋의 오차를 저장합니다.
y_vloss = history.history['val_loss']
y_loss = history.history['loss']
# 그래프로 표현해 봅니다.
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c="red", label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c="blue", label='Trainset_loss')
# 그래프에 그리드를 주고 레이블을 표시하겠습니다.
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
2. 전이 학습으로 모델 성능 극대화하기¶
실습: 전이 학습 실습하기¶
In [5]:
Copied!
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import Input, models, layers, optimizers, metrics
from tensorflow.keras.layers import Dense, Flatten, Activation, Dropout
from tensorflow.keras.applications import VGG16
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
import matplotlib.pyplot as plt
# 학습셋의 변형을 설정하는 부분입니다.
train_datagen = ImageDataGenerator(rescale=1./255, # 주어진 이미지의 크기를 설정합니다.
horizontal_flip=True, # 수평 대칭 이미지를 50% 확률로 만들어 추가합니다.
width_shift_range=0.1, # 전체 크기의 15% 범위에서 좌우로 이동합니다.
height_shift_range=0.1, # 마찬가지로 위, 아래로 이동합니다.
#rotation_range=5, # 정해진 각도만큼 회전시킵니다.
#shear_range=0.7, # 좌표 하나를 고정시키고 나머지를 이동시킵니다.
#zoom_range=1.2, # 확대 또는 축소시킵니다.
#vertical_flip=True, # 수직 대칭 이미지를 만듭니다.
#fill_mode='nearest' # 빈 공간을 채우는 방법입니다. nearest 옵션은 가장 비슷한 색으로 채우게 됩니다.
)
train_generator = train_datagen.flow_from_directory(
'./data/train',
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# 테스트셋의 정규화를 설정합니다.
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'./data/test',
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# VGG16 모델을 불러옵니다.
transfer_model = VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
transfer_model.trainable = False
transfer_model.summary()
# 우리의 모델을 설정합니다.
finetune_model = models.Sequential()
finetune_model.add(transfer_model)
finetune_model.add(Flatten())
finetune_model.add(Dense(64))
finetune_model.add(Activation('relu'))
finetune_model.add(Dropout(0.5))
finetune_model.add(Dense(1))
finetune_model.add(Activation('sigmoid'))
finetune_model.summary()
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import Input, models, layers, optimizers, metrics
from tensorflow.keras.layers import Dense, Flatten, Activation, Dropout
from tensorflow.keras.applications import VGG16
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
import matplotlib.pyplot as plt
# 학습셋의 변형을 설정하는 부분입니다.
train_datagen = ImageDataGenerator(rescale=1./255, # 주어진 이미지의 크기를 설정합니다.
horizontal_flip=True, # 수평 대칭 이미지를 50% 확률로 만들어 추가합니다.
width_shift_range=0.1, # 전체 크기의 15% 범위에서 좌우로 이동합니다.
height_shift_range=0.1, # 마찬가지로 위, 아래로 이동합니다.
#rotation_range=5, # 정해진 각도만큼 회전시킵니다.
#shear_range=0.7, # 좌표 하나를 고정시키고 나머지를 이동시킵니다.
#zoom_range=1.2, # 확대 또는 축소시킵니다.
#vertical_flip=True, # 수직 대칭 이미지를 만듭니다.
#fill_mode='nearest' # 빈 공간을 채우는 방법입니다. nearest 옵션은 가장 비슷한 색으로 채우게 됩니다.
)
train_generator = train_datagen.flow_from_directory(
'./data/train',
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# 테스트셋의 정규화를 설정합니다.
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'./data/test',
target_size=(150, 150),
batch_size=5,
class_mode='binary')
# VGG16 모델을 불러옵니다.
transfer_model = VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
transfer_model.trainable = False
transfer_model.summary()
# 우리의 모델을 설정합니다.
finetune_model = models.Sequential()
finetune_model.add(transfer_model)
finetune_model.add(Flatten())
finetune_model.add(Dense(64))
finetune_model.add(Activation('relu'))
finetune_model.add(Dropout(0.5))
finetune_model.add(Dense(1))
finetune_model.add(Activation('sigmoid'))
finetune_model.summary()
Found 160 images belonging to 2 classes. Found 120 images belonging to 2 classes. Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 58892288/58889256 [==============================] - 5s 0us/step 58900480/58889256 [==============================] - 5s 0us/step Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 150, 150, 3)] 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 150, 150, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 150, 150, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 75, 75, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 75, 75, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 75, 75, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 37, 37, 128) 0 _________________________________________________________________ block3_conv1 (Conv2D) (None, 37, 37, 256) 295168 _________________________________________________________________ block3_conv2 (Conv2D) (None, 37, 37, 256) 590080 _________________________________________________________________ block3_conv3 (Conv2D) (None, 37, 37, 256) 590080 _________________________________________________________________ block3_pool (MaxPooling2D) (None, 18, 18, 256) 0 _________________________________________________________________ block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160 _________________________________________________________________ block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808 _________________________________________________________________ block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808 _________________________________________________________________ block4_pool (MaxPooling2D) (None, 9, 9, 512) 0 _________________________________________________________________ block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808 _________________________________________________________________ block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808 _________________________________________________________________ block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808 _________________________________________________________________ block5_pool (MaxPooling2D) (None, 4, 4, 512) 0 ================================================================= Total params: 14,714,688 Trainable params: 0 Non-trainable params: 14,714,688 _________________________________________________________________ Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= vgg16 (Functional) (None, 4, 4, 512) 14714688 _________________________________________________________________ flatten_1 (Flatten) (None, 8192) 0 _________________________________________________________________ dense_2 (Dense) (None, 64) 524352 _________________________________________________________________ activation_5 (Activation) (None, 64) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 64) 0 _________________________________________________________________ dense_3 (Dense) (None, 1) 65 _________________________________________________________________ activation_6 (Activation) (None, 1) 0 ================================================================= Total params: 15,239,105 Trainable params: 524,417 Non-trainable params: 14,714,688 _________________________________________________________________
In [6]:
Copied!
# 모델의 실행 옵션을 설정합니다.
finetune_model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(learning_rate=0.0002), metrics=['accuracy'])
# 학습의 조기 중단을 설정합니다.
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=5)
# 모델을 실행합니다.
history = finetune_model.fit(
train_generator,
epochs=20,
validation_data=test_generator,
validation_steps=10,
callbacks=[early_stopping_callback])
# 모델의 실행 옵션을 설정합니다.
finetune_model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(learning_rate=0.0002), metrics=['accuracy'])
# 학습의 조기 중단을 설정합니다.
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=5)
# 모델을 실행합니다.
history = finetune_model.fit(
train_generator,
epochs=20,
validation_data=test_generator,
validation_steps=10,
callbacks=[early_stopping_callback])
Epoch 1/20 32/32 [==============================] - 7s 217ms/step - loss: 0.7590 - accuracy: 0.5688 - val_loss: 0.5409 - val_accuracy: 0.7800 Epoch 2/20 32/32 [==============================] - 7s 224ms/step - loss: 0.5509 - accuracy: 0.7125 - val_loss: 0.5420 - val_accuracy: 0.7600 Epoch 3/20 32/32 [==============================] - 7s 208ms/step - loss: 0.4324 - accuracy: 0.8188 - val_loss: 0.4058 - val_accuracy: 0.8200 Epoch 4/20 32/32 [==============================] - 7s 217ms/step - loss: 0.4192 - accuracy: 0.8313 - val_loss: 0.3407 - val_accuracy: 0.9200 Epoch 5/20 32/32 [==============================] - 7s 208ms/step - loss: 0.3139 - accuracy: 0.8813 - val_loss: 0.2570 - val_accuracy: 0.9600 Epoch 6/20 32/32 [==============================] - 7s 220ms/step - loss: 0.2795 - accuracy: 0.9125 - val_loss: 0.2508 - val_accuracy: 0.9200 Epoch 7/20 32/32 [==============================] - 7s 228ms/step - loss: 0.2519 - accuracy: 0.9125 - val_loss: 0.3089 - val_accuracy: 0.8600 Epoch 8/20 32/32 [==============================] - 7s 231ms/step - loss: 0.2198 - accuracy: 0.9375 - val_loss: 0.1758 - val_accuracy: 0.9800 Epoch 9/20 32/32 [==============================] - 7s 230ms/step - loss: 0.2399 - accuracy: 0.9312 - val_loss: 0.1661 - val_accuracy: 0.9400 Epoch 10/20 32/32 [==============================] - 7s 228ms/step - loss: 0.2002 - accuracy: 0.9375 - val_loss: 0.1326 - val_accuracy: 0.9800 Epoch 11/20 32/32 [==============================] - 8s 240ms/step - loss: 0.2483 - accuracy: 0.9125 - val_loss: 0.1493 - val_accuracy: 0.9400 Epoch 12/20 32/32 [==============================] - 8s 248ms/step - loss: 0.1724 - accuracy: 0.9438 - val_loss: 0.1812 - val_accuracy: 0.9400 Epoch 13/20 32/32 [==============================] - 8s 265ms/step - loss: 0.1952 - accuracy: 0.9500 - val_loss: 0.1734 - val_accuracy: 0.9200 Epoch 14/20 32/32 [==============================] - 8s 247ms/step - loss: 0.1618 - accuracy: 0.9625 - val_loss: 0.1516 - val_accuracy: 0.9400 Epoch 15/20 32/32 [==============================] - 10s 300ms/step - loss: 0.1693 - accuracy: 0.9563 - val_loss: 0.2223 - val_accuracy: 0.9200
In [7]:
Copied!
# 검증셋과 학습셋의 오차를 저장합니다.
y_vloss = history.history['val_loss']
y_loss = history.history['loss']
# 그래프로 표현해 봅니다.
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c="red", label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c="blue", label='Trainset_loss')
# 그래프에 그리드를 주고 레이블을 표시하겠습니다.
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
# 검증셋과 학습셋의 오차를 저장합니다.
y_vloss = history.history['val_loss']
y_loss = history.history['loss']
# 그래프로 표현해 봅니다.
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c="red", label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c="blue", label='Trainset_loss')
# 그래프에 그리드를 주고 레이블을 표시하겠습니다.
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()