728x90
반응형
# 증식된 데이터 학습하기
import numpy as np
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777)
print(y_train.shape)
y_train = np.squeeze(y_train)
y_val = np.squeeze(y_val)
print(y_train.shape)
# (35000, 1)
# (35000,)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(horizontal_flip= True, # 수평방향뒤집기
zoom_range =0.2, # 이미지 확대/축소
width_shift_range = 0.1, # 가로방향 이동
height_shift_range = 0.1, # 세로방향 이동
rotation_range = 30, # 이미지 회전
fill_mode = 'nearest') # 이미지 변환시 픽셀 채울수 있는 방법
# 검증데이터셋에는 변환을 사용하지 않음
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train, batch_size = batch_size )
val_generator = val_datagen.flow(x_val, y_val, batch_size = batch_size)
배치정규화 batchnormalization 학습데이터의 불확실성을 해소하기 위한 설정
큰 범위의 출력값의 분포를 줄여주어. 불확실성을 어느정도 감소시키는 방법
출력값이 손실/폭발 경우 범위내의 값으로 변환하여 학습하는 기능
# 검증데이터셋에는 변환을 사용하지 않음
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train, batch_size = batch_size )
val_generator = val_datagen.flow(x_val, y_val, batch_size = batch_size)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Conv2D(kernel_size=3, filters=32, padding='same', input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(Conv2D(kernel_size=3, filters=32, padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))
model.add(Conv2D(kernel_size=3, filters=64, padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(kernel_size=3, filters=64, padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))
model.add(Conv2D(kernel_size=3, filters=128, padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(kernel_size=3, filters=128, padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))
model.add(Flatten())
model.add(Dense(256, Activation('relu')))
model.add(Dense(10, Activation('softmax')))
# 1e-4 : 0.00001
model.compile(optimizer=Adam(1e-4), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
model.summary()
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_8 (Conv2D) (None, 32, 32, 32) 896
activation_4 (Activation) (None, 32, 32, 32) 0
conv2d_9 (Conv2D) (None, 32, 32, 32) 9248
activation_5 (Activation) (None, 32, 32, 32) 0
max_pooling2d_1 (MaxPooling (None, 16, 16, 32) 0
2D)
conv2d_10 (Conv2D) (None, 16, 16, 64) 18496
activation_6 (Activation) (None, 16, 16, 64) 0
conv2d_11 (Conv2D) (None, 16, 16, 64) 36928
activation_7 (Activation) (None, 16, 16, 64) 0
max_pooling2d_2 (MaxPooling (None, 8, 8, 64) 0
2D)
conv2d_12 (Conv2D) (None, 8, 8, 128) 73856
activation_8 (Activation) (None, 8, 8, 128) 0
conv2d_13 (Conv2D) (None, 8, 8, 128) 147584
activation_9 (Activation) (None, 8, 8, 128) 0
max_pooling2d_3 (MaxPooling (None, 4, 4, 128) 0
2D)
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 256) 524544
dense_1 (Dense) (None, 10) 2570
=================================================================
Total params: 814,122
Trainable params: 814,122
Non-trainable params: 0
_________________________________________________________________
def get_step(train_len, batch_size) :
if(train_len % batch_size > 0) :
return train_len // batch_size + 1
else :
return train_len // batch_size
history = model.fit(train_generator,
epochs = 100,
steps_per_epoch = get_step(len(x_train), batch_size),
validation_data = val_generator,
validation_steps = get_step(len(x_val), batch_size))
Epoch 1/100
1094/1094 [==============================] - 50s 36ms/step - loss: 1.7521 - accuracy: 0.3640 - val_loss: 1.5137 - val_accuracy: 0.4489
Epoch 2/100
1094/1094 [==============================] - 40s 37ms/step - loss: 1.4751 - accuracy: 0.4685 - val_loss: 1.2843 - val_accuracy: 0.5383
Epoch 3/100
1094/1094 [==============================] - 40s 37ms/step - loss: 1.3595 - accuracy: 0.5118 - val_loss: 1.1808 - val_accuracy: 0.5744
Epoch 4/100
1094/1094 [==============================] - 40s 36ms/step - loss: 1.2654 - accuracy: 0.5486 - val_loss: 1.2320 - val_accuracy: 0.5648
반응형
'Data_Science > Data_Analysis_Py' 카테고리의 다른 글
53. glob-clothes || 데이터셋만들기 (0) | 2021.12.07 |
---|---|
52. ImageDataGenerator || 이미지 조회 (0) | 2021.12.07 |
50. ImageDataGenerator (0) | 2021.12.07 |
49. cifar10 || convolution (0) | 2021.11.26 |
48. Fashion MNIST || convolution (0) | 2021.11.26 |