728x90
반응형
성능향상
vggnet 방식 : 깊은 신경망 사용
이미지 제너레이터 : 하나의 이미지를 변형하여 사용, 증식 // 수집데이터가 적은 경우 성능향상에 좋음
 
from tensorflow.keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np

 

train_datagen = ImageDataGenerator(horizontal_flip= True, # 수평방향뒤집기
                                   vertical_flip = True, # 수직방향뒤집기
                                   shear_range = 0.5, # 시계반대방향 이미지 밀기, 0.5 정도
                                   brightness_range = [0.5, 1.0], # 이미지 밝기
                                   zoom_range =0.2, # 이미지 확대/축소
                                   width_shift_range = 0.1, # 가로방향 이동
                                   height_shift_range = 0.1, # 세로방향 이동
                                   rotation_range = 30, # 이미지 회전
                                   fill_mode = 'nearest') # 이미지 변환시 픽셀 채울수 있는 방법

 

import tensorflow as tf
image_path = tf.keras.utils.get_file('cat.jpg', 'http://bit.ly/33U6mH9')
image = plt.imread(image_path)
plt.imshow(image)

# 제네레이터 이미지 변환
image = image.reshape((1,) + image.shape)
train_generator = train_datagen.flow(image, batch_size = 1) # 한개씩 꺼내줌
fig = plt.figure(figsize=(5,5))
fig.suptitle('augmented image')
for i in  range(9) :
    data = next(train_generator)
    image = data[0]
    plt.subplot(3,3,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(np.array(image, dtype = np.uint8), cmap='gray')

plt.show()

 

 

반응형
728x90
반응형
import tensorflow as tf
tf.keras.__version__

# 2.7.0

 

import numpy as np
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 3s 0us/step
170508288/170498071 [==============================] - 3s 0us/step
(50000, 32, 32, 3) (50000, 1)
(10000, 32, 32, 3) (10000, 1)

 

x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))

x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std

from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777)

 

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam

model = Sequential()
model.add(Conv2D(kernel_size=3, filters=32, padding='same', activation='relu', input_shape=(32,32,3)))
model.add(Conv2D(kernel_size=3, filters=32, padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))
model.add(Dropout(rate=0.2))

model.add(Conv2D(kernel_size=3, filters=64, padding='same', activation='relu'))
model.add(Conv2D(kernel_size=3, filters=64, padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))
model.add(Dropout(rate=0.2))

model.add(Conv2D(kernel_size=3, filters=128, padding='same', activation='relu'))
model.add(Conv2D(kernel_size=3, filters=128, padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))
model.add(Dropout(rate=0.2))

model.add(Flatten())
model.add(Dense(256, activation='relu')) 
model.add(Dense(10, activation='softmax'))
# 1e-4 : 0.00001
model.compile(optimizer=Adam(1e-4), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])

 

model.summary()

Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 32, 32, 32)        896       
                                                                 
 conv2d_1 (Conv2D)           (None, 32, 32, 32)        9248      
                                                                 
 max_pooling2d (MaxPooling2D  (None, 16, 16, 32)       0         
 )                                                               
                                                                 
 dropout (Dropout)           (None, 16, 16, 32)        0         
                                                                 
 conv2d_2 (Conv2D)           (None, 16, 16, 64)        18496     
                                                                 
 conv2d_3 (Conv2D)           (None, 16, 16, 64)        36928     
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 8, 8, 64)         0         
 2D)                                                             
                                                                 
 dropout_1 (Dropout)         (None, 8, 8, 64)          0         
                                                                 
 conv2d_4 (Conv2D)           (None, 8, 8, 128)         73856     
                                                                 
 conv2d_5 (Conv2D)           (None, 8, 8, 128)         147584    
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 4, 4, 128)        0         
 2D)                                                             
                                                                 
 dropout_2 (Dropout)         (None, 4, 4, 128)         0         
                                                                 
 flatten (Flatten)           (None, 2048)              0         
                                                                 
 dense (Dense)               (None, 256)               524544    
                                                                 
 dense_1 (Dense)             (None, 10)                2570      
                                                                 
=================================================================
Total params: 814,122
Trainable params: 814,122
Non-trainable params: 0
_________________________________________________________________

 

history = model.fit(x_train, y_train, epochs = 30, batch_size=32, validation_data=(x_val, y_val))

Epoch 2/30
1094/1094 [==============================] - 18s 17ms/step - loss: 1.3787 - accuracy: 0.5038 - val_loss: 1.2544 - val_accuracy: 0.5494
Epoch 3/30
1094/1094 [==============================] - 18s 17ms/step - loss: 1.2222 - accuracy: 0.5633 - val_loss: 1.1116 - val_accuracy: 0.6081
Epoch 4/30
1094/1094 [==============================] - 18s 17ms/step - loss: 1.0968 - accuracy: 0.6141 - val_loss: 1.0004 - val_accuracy: 0.6431
Epoch 5/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.9990 - accuracy: 0.6471 - val_loss: 0.9361 - val_accuracy: 0.6729
Epoch 6/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.9249 - accuracy: 0.6755 - val_loss: 0.8729 - val_accuracy: 0.6933
Epoch 7/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.8679 - accuracy: 0.6976 - val_loss: 0.8966 - val_accuracy: 0.6892
Epoch 8/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.8129 - accuracy: 0.7137 - val_loss: 0.8020 - val_accuracy: 0.7199
Epoch 9/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.7627 - accuracy: 0.7346 - val_loss: 0.8088 - val_accuracy: 0.7163
Epoch 10/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.7174 - accuracy: 0.7475 - val_loss: 0.7383 - val_accuracy: 0.7461
Epoch 11/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.6758 - accuracy: 0.7631 - val_loss: 0.7459 - val_accuracy: 0.7385
Epoch 12/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.6344 - accuracy: 0.7775 - val_loss: 0.7039 - val_accuracy: 0.7598
Epoch 13/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.6049 - accuracy: 0.7873 - val_loss: 0.6931 - val_accuracy: 0.7604
Epoch 14/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.5688 - accuracy: 0.8027 - val_loss: 0.6745 - val_accuracy: 0.7714
Epoch 15/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.5286 - accuracy: 0.8126 - val_loss: 0.6714 - val_accuracy: 0.7748
Epoch 16/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.5004 - accuracy: 0.8252 - val_loss: 0.6840 - val_accuracy: 0.7696
Epoch 17/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.4699 - accuracy: 0.8360 - val_loss: 0.6799 - val_accuracy: 0.7753
Epoch 18/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.4400 - accuracy: 0.8460 - val_loss: 0.7541 - val_accuracy: 0.7537
Epoch 19/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.4134 - accuracy: 0.8568 - val_loss: 0.6927 - val_accuracy: 0.7743
Epoch 20/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.3886 - accuracy: 0.8631 - val_loss: 0.6667 - val_accuracy: 0.7825
Epoch 21/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.3647 - accuracy: 0.8710 - val_loss: 0.6785 - val_accuracy: 0.7802
Epoch 22/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.3439 - accuracy: 0.8773 - val_loss: 0.7082 - val_accuracy: 0.7773
Epoch 23/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.3129 - accuracy: 0.8900 - val_loss: 0.6903 - val_accuracy: 0.7829
Epoch 24/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.2956 - accuracy: 0.8934 - val_loss: 0.7203 - val_accuracy: 0.7827
Epoch 25/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.2700 - accuracy: 0.9021 - val_loss: 0.7522 - val_accuracy: 0.7779
Epoch 26/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.2616 - accuracy: 0.9074 - val_loss: 0.7496 - val_accuracy: 0.7777
Epoch 27/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.2394 - accuracy: 0.9151 - val_loss: 0.7958 - val_accuracy: 0.7715
Epoch 28/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.2239 - accuracy: 0.9187 - val_loss: 0.7575 - val_accuracy: 0.7849
Epoch 29/30
1094/1094 [==============================] - 18s 17ms/step - loss: 0.2073 - accuracy: 0.9272 - val_loss: 0.8220 - val_accuracy: 0.7763
Epoch 30/30
1094/1094 [==============================] - 19s 17ms/step - loss: 0.1895 - accuracy: 0.9335 - val_loss: 0.8184 - val_accuracy: 0.7779

 

import matplotlib.pyplot as plt
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b--', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], 'b--', label='accuracy')
plt.plot(history.history['val_accuracy'], 'r--', label='val_accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.show()

 

import tensorflow as tf
get_layer_name = [layer.name for layer in model.layers] # 각 층의 이름
get_output = [layer.output for layer in model.layers]

 

# 모델 전체에서 output을 가져올 수 있다.
visual_model = tf.keras.models.Model(inputs = model.input, outputs = get_output)

 

# 테스트셋의 두번째 데이터는 배이다.
# np.expand_dims : 차수 증가
test_img = np.expand_dims(x_test[1], axis = 0) # 이미지파일 1개
feature_maps = visual_model.predict(test_img) # 예측값

 

for layer_name, feature_map in zip(get_layer_name, feature_maps) :
    # Dense 층 제외
    if (len(feature_map.shape) == 4) :
        img_size = feature_map.shape[1]
        features = feature_map.shape[-1]
        # (img_size, img_size)의 feature_map이 features 개수만큼 존재함
        display_grid = np.zeros((img_size, img_size*features))
        # 각 특징맵을 display_grid 배열에 이어붙인다.
        for i in range(features) :
            x = feature_map[0, :, :, i]
            x -= x.mean()
            x /= x.std()
            x *= 64; x += 128
            # np.clip(x, min, max) : x데이터에서 min보다 작은 값은 min, max보다큰값은 max로 변환
            # unit8 : 부호 없는 8비트 정수형, 0 ~ 255까지 값만 저장
            x = np.clip(x, 0, 255).astype('unit8')
            display_grid[:, i*img_size : (i+1)*img_size] = x
                
            plt.figure(figsize = (features, 2+ 1./features))
            plt.title(layer_name, fontsize = 20)
            plt.grid(False)
            plt.imshow(display_grid, aspect = 'auto', cmap = 'viridis')

 

 

 

feature_maps

array([[[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 1.39482296e+00],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         2.76024416e-02, 0.00000000e+00, 5.43535233e-01],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         1.38139203e-02, 0.00000000e+00, 5.73320389e-01],
        ...,
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         1.09774694e-02, 0.00000000e+00, 5.79168320e-01],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         1.65029764e-02, 0.00000000e+00, 5.72852731e-01],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         2.69572318e-01, 1.54419020e-01, 6.14448786e-01]],

       [[8.70621726e-02, 1.16789293e+00, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 6.19573593e-01],
        [0.00000000e+00, 3.38762790e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 2.36449093e-02],
        [0.00000000e+00, 3.63755435e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 3.32385749e-02],
        ...,
        [0.00000000e+00, 3.74371648e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 3.94297838e-02],
        [0.00000000e+00, 3.64182293e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 3.97360772e-02],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         2.79395968e-01, 1.05052516e-01, 5.69859147e-01]],

       [[9.11299512e-02, 1.19996595e+00, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 6.01863384e-01],
        [0.00000000e+00, 3.73179615e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 1.75776482e-02],
        [0.00000000e+00, 3.93598586e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 3.17991376e-02],
        ...,
        [0.00000000e+00, 4.09143835e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 2.86802649e-02],
        [0.00000000e+00, 3.96855444e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 3.05447578e-02],
        [0.00000000e+00, 3.17612104e-03, 0.00000000e+00, ...,
         2.83317327e-01, 1.04692817e-01, 5.77692389e-01]],

       ...,

       [[0.00000000e+00, 0.00000000e+00, 6.22074790e-02, ...,
         1.29832864e-01, 4.22493607e-01, 1.15428813e-01],
        [0.00000000e+00, 0.00000000e+00, 1.04348838e-01, ...,
         1.16021566e-01, 4.70879734e-01, 1.49464458e-01],
        [0.00000000e+00, 0.00000000e+00, 7.95534253e-02, ...,
         5.20418696e-02, 3.50514859e-01, 3.41182947e-01],
        ...,
        [1.10264830e-02, 2.37248719e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 1.13192379e-01],
        [0.00000000e+00, 2.84808189e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 8.39859173e-02],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         1.35430560e-01, 1.16444044e-02, 4.01980400e-01]],

       [[0.00000000e+00, 0.00000000e+00, 5.82160428e-03, ...,
         1.23548001e-01, 4.29432690e-01, 1.80128574e-01],
        [0.00000000e+00, 0.00000000e+00, 2.48929821e-02, ...,
         5.41288555e-02, 4.20156270e-01, 2.42548764e-01],
        [0.00000000e+00, 0.00000000e+00, 5.73865324e-03, ...,
         8.02730769e-02, 2.72210300e-01, 4.23867196e-01],
        ...,
        [3.34452726e-02, 3.19640160e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 4.02029157e-02],
        [0.00000000e+00, 3.15535009e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 4.96883318e-02],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         1.07538484e-01, 1.59807876e-03, 3.95719260e-01]],

       [[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         9.56358314e-02, 3.18210661e-01, 4.88872856e-01],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         1.20503813e-01, 3.74177188e-01, 6.73626781e-01],
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         2.22301960e-01, 3.52858812e-01, 7.71849155e-01],
        ...,
        [5.10937199e-02, 6.91264212e-01, 9.41429287e-04, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [5.38808331e-02, 7.41093576e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [0.00000000e+00, 2.85340041e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 1.04997531e-02]]], dtype=float32)

 

x_test[1].shape
(32, 32, 3)

 

test_img.shape

(1, 32, 32, 3)

 

model.summary()

Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 32, 32, 32)        896       
                                                                 
 conv2d_1 (Conv2D)           (None, 32, 32, 32)        9248      
                                                                 
 max_pooling2d (MaxPooling2D  (None, 16, 16, 32)       0         
 )                                                               
                                                                 
 dropout (Dropout)           (None, 16, 16, 32)        0         
                                                                 
 conv2d_2 (Conv2D)           (None, 16, 16, 64)        18496     
                                                                 
 conv2d_3 (Conv2D)           (None, 16, 16, 64)        36928     
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 8, 8, 64)         0         
 2D)                                                             
                                                                 
 dropout_1 (Dropout)         (None, 8, 8, 64)          0         
                                                                 
 conv2d_4 (Conv2D)           (None, 8, 8, 128)         73856     
                                                                 
 conv2d_5 (Conv2D)           (None, 8, 8, 128)         147584    
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 4, 4, 128)        0         
 2D)                                                             
                                                                 
 dropout_2 (Dropout)         (None, 4, 4, 128)         0         
                                                                 
 flatten (Flatten)           (None, 2048)              0         
                                                                 
 dense (Dense)               (None, 256)               524544    
                                                                 
 dense_1 (Dense)             (None, 10)                2570      
                                                                 
=================================================================
Total params: 814,122
Trainable params: 814,122
Non-trainable params: 0
_________________________________________________________________

 

반응형
728x90
반응형

# Fashion Mnist with cnn

import tensorflow as tf
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_x, train_y), (test_x, test_y) = fashion_mnist.load_data()
train_x = train_x / 255.0
test_x = test_x / 255.0

 

print(train_x.shape, test_x.shape) # 2차원 이미지
train_x = train_x.reshape(-1, 28, 28, 1) # 2차원 이미지 => 3차원 이미지
test_x = test_x.reshape(-1, 28, 28, 1)
print(train_x.shape, test_x.shape)

# (60000, 28, 28) (10000, 28, 28)
# (60000, 28, 28, 1) (10000, 28, 28, 1)

 

# 이미지로 데이터 확인
import matplotlib.pyplot as plt
plt.figure(figsize = ( 10, 10 ))
for c in range(16) :
    plt.subplot(4,4,c+1)
    plt.imshow(train_x[c].reshape(28, 28), cmap='gray')
plt.show()

print(train_y[:16])

# [9 0 0 3 0 2 7 2 5 5 0 9 5 5 7 9]

# cnn 모델 정의
# kernel_size = 가중치 필터 3행3열

model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(input_shape=(28,28,1), kernel_size=(3,3), filters=16), # 합성곱층 3개
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=32),
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=64),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(units=128, activation='relu'),
    tf.keras.layers.Dense(units=10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
             loss='sparse_categorical_crossentropy',
             metrics=['accuracy'])
model.summary()

Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_10 (Conv2D)           (None, 26, 26, 16)        160       
_________________________________________________________________
conv2d_11 (Conv2D)           (None, 24, 24, 32)        4640      
_________________________________________________________________
conv2d_12 (Conv2D)           (None, 22, 22, 64)        18496     
_________________________________________________________________
flatten_1 (Flatten)          (None, 30976)             0         
_________________________________________________________________
dense (Dense)                (None, 128)               3965056   
_________________________________________________________________
dense_1 (Dense)              (None, 10)                1290      
=================================================================
Total params: 3,989,642
Trainable params: 3,989,642
Non-trainable params: 0
_________________________________________________________________

 

history=model.fit(train_x, train_y, epochs=10, validation_split=0.25)

Epoch 1/10
1407/1407 [==============================] - 161s 114ms/step - loss: 0.4653 - accuracy: 0.8360 - val_loss: 0.4354 - val_accuracy: 0.8505
Epoch 2/10
1407/1407 [==============================] - 161s 114ms/step - loss: 0.3381 - accuracy: 0.8775 - val_loss: 0.4108 - val_accuracy: 0.8515
Epoch 3/10
1407/1407 [==============================] - 159s 113ms/step - loss: 0.2825 - accuracy: 0.8956 - val_loss: 0.3780 - val_accuracy: 0.8667
Epoch 4/10
1407/1407 [==============================] - 158s 112ms/step - loss: 0.2400 - accuracy: 0.9120 - val_loss: 0.4034 - val_accuracy: 0.8691
Epoch 5/10
1407/1407 [==============================] - 158s 112ms/step - loss: 0.2070 - accuracy: 0.9235 - val_loss: 0.4951 - val_accuracy: 0.8657
Epoch 6/10
1407/1407 [==============================] - 158s 112ms/step - loss: 0.1814 - accuracy: 0.9320 - val_loss: 0.4982 - val_accuracy: 0.8533
Epoch 7/10
1407/1407 [==============================] - 158s 112ms/step - loss: 0.1623 - accuracy: 0.9398 - val_loss: 0.4963 - val_accuracy: 0.8640
Epoch 8/10
1407/1407 [==============================] - 158s 112ms/step - loss: 0.1448 - accuracy: 0.9460 - val_loss: 0.5650 - val_accuracy: 0.8687
Epoch 9/10
1407/1407 [==============================] - 157s 112ms/step - loss: 0.1350 - accuracy: 0.9511 - val_loss: 0.6309 - val_accuracy: 0.8693
Epoch 10/10
1407/1407 [==============================] - 157s 112ms/step - loss: 0.1174 - accuracy: 0.9574 - val_loss: 0.6193 - val_accuracy: 0.8647

 

import matplotlib.pyplot as plt
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b--', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], 'b--', label='accuracy')
plt.plot(history.history['val_accuracy'], 'r--', label='val_accuracy')
plt.xlabel('Epoch')
plt.ylim(0.7, 1)
plt.legend()
plt.show()

# cnn 성능향상방법
# 1. vggnet방식 사용 : 14개까지 깊은 신경망
# 2. 이미지 보강하기 : 하나의 이미지를 여러개의 이미지로 변형
# cnn 모델 정의
# 풀링레이어, 드랍아웃레이어 추가
model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(input_shape=(28,28,1), kernel_size=(3,3), filters=32), # 합성곱층 3개
    tf.keras.layers.MaxPool2D(strides=(2,2)), # 특성맵의 크기가 반으로 줄어듬
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=64), # 패딩이없어서 2개씩 감소
    tf.keras.layers.MaxPool2D(strides=(2,2)), # 특징맵을 2행2열로나눠서 그중 최대값만 선택한 특징맵
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=128), #

    tf.keras.layers.Flatten(), # 9x128
    tf.keras.layers.Dense(units=128, activation='relu'), 
    tf.keras.layers.Dropout(rate=0.3), # 일부러 누락
    tf.keras.layers.Dense(units=10, activation='softmax') # 출력층
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
             loss='sparse_categorical_crossentropy',
             metrics=['accuracy'])

model.summary()


Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_3 (Conv2D)           (None, 26, 26, 32)        320       
                                                                 
 max_pooling2d (MaxPooling2D  (None, 13, 13, 32)       0         
 )                                                               
                                                                 
 conv2d_4 (Conv2D)           (None, 11, 11, 64)        18496     
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 5, 5, 64)         0         
 2D)                                                             
                                                                 
 conv2d_5 (Conv2D)           (None, 3, 3, 128)         73856     
                                                                 
 flatten_1 (Flatten)         (None, 1152)              0         
                                                                 
 dense_2 (Dense)             (None, 128)               147584    
                                                                 
 dropout (Dropout)           (None, 128)               0         
                                                                 
 dense_3 (Dense)             (None, 10)                1290      
                                                                 
=================================================================
Total params: 241,546
Trainable params: 241,546
Non-trainable params: 0
_________________________________________________________________

 

history=model.fit(train_x, train_y, epochs=10, validation_split=0.25)

Epoch 1/10
1407/1407 [==============================] - 48s 34ms/step - loss: 0.5216 - accuracy: 0.8115 - val_loss: 0.3656 - val_accuracy: 0.8667
Epoch 2/10
1407/1407 [==============================] - 48s 34ms/step - loss: 0.3579 - accuracy: 0.8712 - val_loss: 0.3305 - val_accuracy: 0.8783
Epoch 3/10
1407/1407 [==============================] - 48s 34ms/step - loss: 0.3150 - accuracy: 0.8869 - val_loss: 0.3104 - val_accuracy: 0.8909
Epoch 4/10
1407/1407 [==============================] - 47s 34ms/step - loss: 0.2887 - accuracy: 0.8947 - val_loss: 0.3002 - val_accuracy: 0.8916
Epoch 5/10
1407/1407 [==============================] - 47s 34ms/step - loss: 0.2686 - accuracy: 0.9012 - val_loss: 0.3242 - val_accuracy: 0.8816
Epoch 6/10
1407/1407 [==============================] - 47s 34ms/step - loss: 0.2530 - accuracy: 0.9064 - val_loss: 0.3300 - val_accuracy: 0.8905
Epoch 7/10
1407/1407 [==============================] - 47s 33ms/step - loss: 0.2344 - accuracy: 0.9127 - val_loss: 0.2962 - val_accuracy: 0.8948
Epoch 8/10
1407/1407 [==============================] - 47s 33ms/step - loss: 0.2244 - accuracy: 0.9170 - val_loss: 0.3060 - val_accuracy: 0.8957
Epoch 9/10
1407/1407 [==============================] - 47s 33ms/step - loss: 0.2116 - accuracy: 0.9216 - val_loss: 0.3231 - val_accuracy: 0.8984
Epoch 10/10
1407/1407 [==============================] - 47s 33ms/step - loss: 0.2025 - accuracy: 0.9259 - val_loss: 0.3317 - val_accuracy: 0.8967

 

import matplotlib.pyplot as plt
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b--', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], 'b--', label='accuracy')
plt.plot(history.history['val_accuracy'], 'r--', label='val_accuracy')
plt.xlabel('Epoch')
plt.ylim(0.7, 1)
plt.legend()
plt.show()

 

model.evaluate(test_x, test_y, verbose=0)

# [0.34508538246154785, 0.89410001039505]

 

model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(input_shape=(28,28,1), kernel_size=(3,3), filters=32,
                          padding='same', activation='relu'), # padding=same이라서 28 안줄어즘
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=64, padding='same', activation='relu'),
    tf.keras.layers.MaxPool2D(pool_size=(2,2)), 
    tf.keras.layers.Dropout(rate=0.5),
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=128, padding='same', activation='relu'), 
    tf.keras.layers.Conv2D(kernel_size=(3,3), filters=256, padding='valid', activation='relu'), 
    tf.keras.layers.MaxPool2D(strides=(2,2)), 
    tf.keras.layers.Dropout(rate=0.5),
    tf.keras.layers.Flatten(),
    
    tf.keras.layers.Dense(units=512, activation='relu'), 
    tf.keras.layers.Dropout(rate=0.5), # 일부러 누락
    tf.keras.layers.Dense(units=256, activation='relu'), 
    tf.keras.layers.Dropout(rate=0.5), # 일부러 누락
    tf.keras.layers.Dense(units=10, activation='softmax') # 출력층
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
             loss='sparse_categorical_crossentropy',
             metrics=['accuracy'])
model.summary()

Model: "sequential_2"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_6 (Conv2D)           (None, 28, 28, 32)        320       
                                                                 
 conv2d_7 (Conv2D)           (None, 28, 28, 64)        18496     
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 14, 14, 64)       0         
 2D)                                                             
                                                                 
 dropout_1 (Dropout)         (None, 14, 14, 64)        0         
                                                                 
 conv2d_8 (Conv2D)           (None, 14, 14, 128)       73856     
                                                                 
 conv2d_9 (Conv2D)           (None, 12, 12, 256)       295168    
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 6, 6, 256)        0         
 2D)                                                             
                                                                 
 dropout_2 (Dropout)         (None, 6, 6, 256)         0         
                                                                 
 flatten_2 (Flatten)         (None, 9216)              0         
                                                                 
 dense_4 (Dense)             (None, 512)               4719104   
                                                                 
 dropout_3 (Dropout)         (None, 512)               0         
                                                                 
 dense_5 (Dense)             (None, 256)               131328    
                                                                 
 dropout_4 (Dropout)         (None, 256)               0         
                                                                 
 dense_6 (Dense)             (None, 10)                2570      
                                                                 
=================================================================
Total params: 5,240,842
Trainable params: 5,240,842
Non-trainable params: 0
_________________________________________________________________

 

history=model.fit(train_x, train_y, epochs=10, validation_split=0.25)

Epoch 1/10
1407/1407 [==============================] - 543s 386ms/step - loss: 0.5847 - accuracy: 0.7843 - val_loss: 0.3323 - val_accuracy: 0.8735
Epoch 2/10
1407/1407 [==============================] - 540s 384ms/step - loss: 0.3726 - accuracy: 0.8672 - val_loss: 0.2894 - val_accuracy: 0.8891
Epoch 3/10
1407/1407 [==============================] - 540s 384ms/step - loss: 0.3268 - accuracy: 0.8821 - val_loss: 0.2642 - val_accuracy: 0.9027
Epoch 4/10
1407/1407 [==============================] - 542s 386ms/step - loss: 0.3048 - accuracy: 0.8894 - val_loss: 0.2433 - val_accuracy: 0.9103
Epoch 5/10
1407/1407 [==============================] - 541s 385ms/step - loss: 0.2897 - accuracy: 0.8959 - val_loss: 0.2278 - val_accuracy: 0.9140
Epoch 6/10
1407/1407 [==============================] - 543s 386ms/step - loss: 0.2773 - accuracy: 0.9004 - val_loss: 0.2281 - val_accuracy: 0.9122
Epoch 7/10
1407/1407 [==============================] - 543s 386ms/step - loss: 0.2666 - accuracy: 0.9039 - val_loss: 0.2333 - val_accuracy: 0.9160
Epoch 8/10
1407/1407 [==============================] - 548s 389ms/step - loss: 0.2672 - accuracy: 0.9039 - val_loss: 0.2255 - val_accuracy: 0.9179
Epoch 9/10
1407/1407 [==============================] - 553s 393ms/step - loss: 0.2552 - accuracy: 0.9094 - val_loss: 0.2200 - val_accuracy: 0.9205
Epoch 10/10
1407/1407 [==============================] - 554s 394ms/step - loss: 0.2532 - accuracy: 0.9078 - val_loss: 0.2148 - val_accuracy: 0.9227

 

import matplotlib.pyplot as plt
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b--', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], 'g-', label='accuracy')
plt.plot(history.history['val_accuracy'], 'k--', label='val_accuracy')
plt.xlabel('Epoch')
plt.ylim(0.7, 1)
plt.legend()
plt.show()

model.evaluate(test_x, test_y, verbose=0)

# [0.23406749963760376, 0.9172999858856201]

 

 

 

 

반응형
728x90
반응형
import tensorflow as tf
import matplotlib.pyplot as plt
image_path = tf.keras.utils.get_file('cat.jpg', 'http://bit.ly/33U6mH9')
# imread : 이미지값을 숫자로 리턴
image = plt.imread(image_path)
print(image.shape)

# (241, 320, 3)

 

print(image[0])

[[223 220 203]
 [224 221 204]
 [224 221 204]
 [224 221 204]
 [224 221 204]
 [225 222 205]
 [225 222 205]
 [225 222 205]
 [224 221 204]
 [224 221 204]
 [224 221 204]
 [225 222 205]
 [225 222 205]
 [225 222 205]
 [225 222 205]
 [226 223 206]
 [224 225 207]
 [224 225 207]
 [226 225 207]
 [226 225 207]
 [226 225 207]
 [226 225 207]
 [226 225 207]
 [227 224 207]
 [226 223 206]
 [226 223 206]
 [226 223 206]
 [226 223 206]
 [228 222 206]
 [229 223 207]
 [229 223 207]
 [229 223 207]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [230 223 204]
 [230 223 204]
 [231 224 205]
 [231 224 205]
 [232 225 206]
 [232 225 206]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [230 227 208]
 [230 227 208]
 [231 228 209]
 [231 228 209]
 [231 228 209]
 [231 228 211]
 [232 229 212]
 [232 229 212]
 [233 230 215]
 [233 230 215]
 [233 229 217]
 [233 229 217]
 [233 229 217]
 [233 229 217]
 [233 229 217]
 [233 229 218]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [231 228 213]
 [230 227 212]
 [230 227 212]
 [229 226 211]
 [229 226 211]
 [230 227 212]
 [230 227 212]
 [231 228 213]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [232 229 212]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [230 227 210]
 [230 227 210]
 [230 227 210]
 [234 227 209]
 [234 227 209]
 [234 227 209]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [232 225 207]
 [234 227 209]
 [234 227 209]
 [234 227 209]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [232 225 207]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [233 228 209]
 [233 228 209]
 [233 228 209]
 [233 228 209]
 [234 229 210]
 [234 229 210]
 [233 228 209]
 [233 228 209]
 [234 229 209]
 [235 229 207]
 [236 230 208]
 [236 230 206]
 [237 231 205]
 [237 231 205]
 [235 229 205]
 [236 232 207]
 [236 231 211]
 [232 229 210]
 [232 231 213]
 [235 233 220]
 [233 233 221]
 [231 231 219]
 [246 228 206]
 [245 227 205]
 [237 215 194]
 [217 190 169]
 [195 164 143]
 [181 145 123]
 [172 131 109]
 [168 123 100]
 [153 107  83]
 [150 104  78]
 [149 104  75]
 [150 106  77]
 [149 108  76]
 [147 109  73]
 [146 110  74]
 [149 113  77]
 [160 126  99]
 [165 133 110]
 [170 142 121]
 [174 148 131]
 [179 157 146]
 [183 166 159]
 [184 170 167]
 [180 170 169]
 [182 173 174]
 [188 179 182]
 [192 183 184]
 [194 184 183]
 [197 186 182]
 [197 186 180]
 [190 177 168]
 [179 165 156]
 [182 165 155]
 [184 168 155]
 [188 170 156]
 [188 169 154]
 [181 160 141]
 [172 150 129]
 [170 148 125]
 [173 151 128]
 [180 158 135]
 [176 156 132]
 [195 179 156]
 [226 211 190]
 [234 223 205]
 [231 224 206]
 [231 225 209]
 [232 226 212]
 [229 221 200]
 [229 221 200]
 [228 220 199]
 [228 220 199]
 [227 219 198]
 [227 219 198]
 [226 218 197]
 [226 218 197]
 [225 217 196]
 [225 217 196]
 [225 217 196]
 [225 217 196]
 [224 216 195]
 [224 216 195]
 [224 216 195]
 [224 216 195]
 [224 217 199]
 [224 217 199]
 [224 217 199]
 [224 217 199]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [222 215 197]
 [222 215 197]
 [222 215 197]
 [222 215 197]
 [221 214 196]
 [221 214 196]
 [220 215 193]
 [220 215 193]
 [220 215 195]
 [220 215 195]
 [219 214 194]
 [219 214 194]
 [219 214 195]
 [219 214 195]
 [219 213 197]
 [218 212 196]
 [218 212 196]
 [218 212 198]
 [218 212 198]
 [217 211 197]
 [217 211 197]
 [217 211 197]
 [221 213 192]
 [221 213 192]
 [221 213 192]
 [221 213 192]
 [221 213 192]
 [219 212 193]
 [219 212 193]
 [219 212 193]
 [217 210 191]
 [217 212 193]
 [217 212 193]
 [217 212 193]
 [217 212 193]
 [218 213 194]
 [218 213 194]
 [216 213 194]]

 

titles = ['RGB Image', 'Red channel', 'Green channel', 'Blue channel']
from numpy import array, zeros_like
def channel(image, color) :
    if color not in (0, 1, 2) : return image
    c = image[..., color]
    print(c)
    z = zeros_like(c)
    return array([(c,z,z),(z,c,z),(z,z,c)][color]).transpose(1,2,0) # 행 배열의 형태를 변경
colors = range(-1, 3)
fig, axes = plt.subplots(1, 4, figsize = (13, 3))
objs = zip(axes, titles, colors)
for ax, title, color in objs :
    ax.imshow(channel(image, color))
    ax.set_title(title)
    ax.set_xticks(())
    ax.set_yticks(())
plt.show()




[[223 224 224 ... 218 218 216]
 [224 224 224 ... 218 218 216]
 [224 224 224 ... 218 216 216]
 ...
 [138 138 144 ... 198 196 194]
 [144 134 135 ... 200 200 200]
 [148 145 143 ... 200 198 196]]
[[220 221 221 ... 213 213 213]
 [221 221 221 ... 213 213 213]
 [221 221 221 ... 213 213 213]
 ...
 [ 96  96 101 ... 191 189 187]
 [102  92  92 ... 193 193 193]
 [101  98  97 ... 193 191 189]]
[[203 204 204 ... 194 194 194]
 [204 204 204 ... 194 194 194]
 [204 204 204 ... 194 194 196]
 ...
 [ 38  38  46 ... 172 170 168]
 [ 44  34  37 ... 174 174 174]
 [ 49  46  47 ... 174 172 170]]

# Conv2D 컨볼류션 층
# kernel_size : 3행3열 이미지분석단위
# strides : 이미지분석을 위해 이동하는 픽셀수 기본값은 1
# padding : 분석결과의 픽셀이 줄지 않도록 패딩을 설정
        # valide : 패딩설정 안함 // same : 패딩설정함
# filters : 층의 갯수

 

conv1 = tf.keras.layers.Conv2D(kernel_size=(3,3),strides=(2,2), padding='valid',filters=16)

# Maxpool2D : 최대플링층
# pool_size  : 플링칭의 형태

pool1 = tf.keras.layers.MaxPool2D(pool_size=(2,2),strides=(2,2))

# dropout 층
# 과대적합을 해소하기 위한 옵션
# rate 만큼 분석 제외

pool1 = tf.keras.layers.Dropout(rate=0.3)

 

 

 

반응형
728x90
반응형
import pandas as pd
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/"
red = pd.read_csv(url+'winequality-red.csv',sep=';')
white = pd.read_csv(url+'winequality-white.csv',sep=';')

 

print(red.head())

   fixed acidity  volatile acidity  citric acid  residual sugar  chlorides  \
0            7.4              0.70         0.00             1.9      0.076   
1            7.8              0.88         0.00             2.6      0.098   
2            7.8              0.76         0.04             2.3      0.092   
3           11.2              0.28         0.56             1.9      0.075   
4            7.4              0.70         0.00             1.9      0.076   

   free sulfur dioxide  total sulfur dioxide  density    pH  sulphates  \
0                 11.0                  34.0   0.9978  3.51       0.56   
1                 25.0                  67.0   0.9968  3.20       0.68   
2                 15.0                  54.0   0.9970  3.26       0.65   
3                 17.0                  60.0   0.9980  3.16       0.58   
4                 11.0                  34.0   0.9978  3.51       0.56   

   alcohol  quality  
0      9.4        5  
1      9.8        5  
2      9.8        5  
3      9.8        6  
4      9.4        5

 

red.info()

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1599 entries, 0 to 1598
Data columns (total 12 columns):
 #   Column                Non-Null Count  Dtype  
---  ------                --------------  -----  
 0   fixed acidity         1599 non-null   float64
 1   volatile acidity      1599 non-null   float64
 2   citric acid           1599 non-null   float64
 3   residual sugar        1599 non-null   float64
 4   chlorides             1599 non-null   float64
 5   free sulfur dioxide   1599 non-null   float64
 6   total sulfur dioxide  1599 non-null   float64
 7   density               1599 non-null   float64
 8   pH                    1599 non-null   float64
 9   sulphates             1599 non-null   float64
 10  alcohol               1599 non-null   float64
 11  quality               1599 non-null   int64  
dtypes: float64(11), int64(1)
memory usage: 150.0 KB

 

print(white.head())

   fixed acidity  volatile acidity  citric acid  residual sugar  chlorides  \
0            7.0              0.27         0.36            20.7      0.045   
1            6.3              0.30         0.34             1.6      0.049   
2            8.1              0.28         0.40             6.9      0.050   
3            7.2              0.23         0.32             8.5      0.058   
4            7.2              0.23         0.32             8.5      0.058   

   free sulfur dioxide  total sulfur dioxide  density    pH  sulphates  \
0                 45.0                 170.0   1.0010  3.00       0.45   
1                 14.0                 132.0   0.9940  3.30       0.49   
2                 30.0                  97.0   0.9951  3.26       0.44   
3                 47.0                 186.0   0.9956  3.19       0.40   
4                 47.0                 186.0   0.9956  3.19       0.40   

   alcohol  quality  
0      8.8        6  
1      9.5        6  
2     10.1        6  
3      9.9        6  
4      9.9        6

 

white.info()

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4898 entries, 0 to 4897
Data columns (total 12 columns):
 #   Column                Non-Null Count  Dtype  
---  ------                --------------  -----  
 0   fixed acidity         4898 non-null   float64
 1   volatile acidity      4898 non-null   float64
 2   citric acid           4898 non-null   float64
 3   residual sugar        4898 non-null   float64
 4   chlorides             4898 non-null   float64
 5   free sulfur dioxide   4898 non-null   float64
 6   total sulfur dioxide  4898 non-null   float64
 7   density               4898 non-null   float64
 8   pH                    4898 non-null   float64
 9   sulphates             4898 non-null   float64
 10  alcohol               4898 non-null   float64
 11  quality               4898 non-null   int64  
dtypes: float64(11), int64(1)
memory usage: 459.3 KB

 

red['type'] = 0
white['type'] = 1
wine = pd.concat([red, white])
wine.info()

<class 'pandas.core.frame.DataFrame'>
Int64Index: 6497 entries, 0 to 4897
Data columns (total 13 columns):
 #   Column                Non-Null Count  Dtype  
---  ------                --------------  -----  
 0   fixed acidity         6497 non-null   float64
 1   volatile acidity      6497 non-null   float64
 2   citric acid           6497 non-null   float64
 3   residual sugar        6497 non-null   float64
 4   chlorides             6497 non-null   float64
 5   free sulfur dioxide   6497 non-null   float64
 6   total sulfur dioxide  6497 non-null   float64
 7   density               6497 non-null   float64
 8   pH                    6497 non-null   float64
 9   sulphates             6497 non-null   float64
 10  alcohol               6497 non-null   float64
 11  quality               6497 non-null   int64  
 12  type                  6497 non-null   int64  
dtypes: float64(11), int64(2)
memory usage: 710.6 KB

 

# 데이터 정보
wine.describe()

	fixed acidity	volatile acidity	citric acid	residual sugar	chlorides	free sulfur dioxide	total sulfur dioxide	density	pH	sulphates	alcohol	quality	type
count	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000
mean	7.215307	0.339666	0.318633	5.443235	0.056034	30.525319	115.744574	0.994697	3.218501	0.531268	10.491801	5.818378	0.753886
std	1.296434	0.164636	0.145318	4.757804	0.035034	17.749400	56.521855	0.002999	0.160787	0.148806	1.192712	0.873255	0.430779
min	3.800000	0.080000	0.000000	0.600000	0.009000	1.000000	6.000000	0.987110	2.720000	0.220000	8.000000	3.000000	0.000000
25%	6.400000	0.230000	0.250000	1.800000	0.038000	17.000000	77.000000	0.992340	3.110000	0.430000	9.500000	5.000000	1.000000
50%	7.000000	0.290000	0.310000	3.000000	0.047000	29.000000	118.000000	0.994890	3.210000	0.510000	10.300000	6.000000	1.000000
75%	7.700000	0.400000	0.390000	8.100000	0.065000	41.000000	156.000000	0.996990	3.320000	0.600000	11.300000	6.000000	1.000000
max	15.900000	1.580000	1.660000	65.800000	0.611000	289.000000	440.000000	1.038980	4.010000	2.000000	14.900000	9.000000	1.000000

 

wine.type.value_counts()

1    4898
0    1599
Name: type, dtype: int64

 

wine_norm = (wine - wine.min()) / (wine.max() - wine.min())

wine_norm.describe()
	fixed acidity	volatile acidity	citric acid	residual sugar	chlorides	free sulfur dioxide	total sulfur dioxide	density	pH	sulphates	alcohol	quality	type
count	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000
mean	0.282257	0.173111	0.191948	0.074283	0.078129	0.102518	0.252868	0.146262	0.386435	0.174870	0.361131	0.469730	0.753886
std	0.107143	0.109758	0.087541	0.072972	0.058195	0.061630	0.130235	0.057811	0.124641	0.083599	0.172857	0.145543	0.430779
min	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000
25%	0.214876	0.100000	0.150602	0.018405	0.048173	0.055556	0.163594	0.100829	0.302326	0.117978	0.217391	0.333333	1.000000
50%	0.264463	0.140000	0.186747	0.036810	0.063123	0.097222	0.258065	0.149990	0.379845	0.162921	0.333333	0.500000	1.000000
75%	0.322314	0.213333	0.234940	0.115031	0.093023	0.138889	0.345622	0.190476	0.465116	0.213483	0.478261	0.500000	1.000000
max	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000

 

wine_norm.head()

	fixed acidity	volatile acidity	citric acid	residual sugar	chlorides	free sulfur dioxide	total sulfur dioxide	density	pH	sulphates	alcohol	quality	type
0	0.297521	0.413333	0.000000	0.019939	0.111296	0.034722	0.064516	0.206092	0.612403	0.191011	0.202899	0.333333	0.0
1	0.330579	0.533333	0.000000	0.030675	0.147841	0.083333	0.140553	0.186813	0.372093	0.258427	0.260870	0.333333	0.0
2	0.330579	0.453333	0.024096	0.026074	0.137874	0.048611	0.110599	0.190669	0.418605	0.241573	0.260870	0.333333	0.0
3	0.611570	0.133333	0.337349	0.019939	0.109635	0.055556	0.124424	0.209948	0.341085	0.202247	0.260870	0.500000	0.0
4	0.297521	0.413333	0.000000	0.019939	0.111296	0.034722	0.064516	0.206092	0.612403	0.191011	0.202899	0.333333	0.0

 

# wine_norm # 섞기
import numpy as np
wine_shuffle = wine_norm.sample(frac=1)
print(wine_shuffle.head())

     fixed acidity  volatile acidity  citric acid  residual sugar  chlorides  \
2593       0.206612          0.086667     0.174699        0.170245   0.064784   
1273       0.305785          0.333333     0.120482        0.021472   0.106312   
1348       0.280992          0.383333     0.018072        0.018405   0.114618   
665        0.330579          0.133333     0.132530        0.012270   0.078073   
1040       0.363636          0.400000     0.180723        0.023006   0.063123   

      free sulfur dioxide  total sulfur dioxide   density        pH  \
2593             0.166667              0.324885  0.148641  0.387597   
1273             0.114583              0.087558  0.150954  0.294574   
1348             0.020833              0.013825  0.168884  0.480620   
665              0.079861              0.285714  0.140544  0.434109   
1040             0.055556              0.304147  0.152111  0.387597   

      sulphates   alcohol   quality  type  
2593   0.089888  0.405797  0.333333   1.0  
1273   0.117978  0.188406  0.333333   0.0  
1348   0.095506  0.217391  0.333333   0.0  
665    0.146067  0.217391  0.333333   1.0  
1040   0.275281  0.405797  0.166667   1.0

 

wine_np = wine_shuffle.to_numpy()
print(wine_np[:5])

[[0.20661157 0.08666667 0.1746988  0.1702454  0.06478405 0.16666667
  0.32488479 0.14864083 0.3875969  0.08988764 0.4057971  0.33333333
  1.        ]
 [0.30578512 0.33333333 0.12048193 0.02147239 0.10631229 0.11458333
  0.0875576  0.15095431 0.29457364 0.11797753 0.1884058  0.33333333
  0.        ]
 [0.28099174 0.38333333 0.01807229 0.01840491 0.11461794 0.02083333
  0.01382488 0.16888375 0.48062016 0.09550562 0.2173913  0.33333333
  0.        ]
 [0.33057851 0.13333333 0.13253012 0.01226994 0.07807309 0.07986111
  0.28571429 0.14054367 0.43410853 0.14606742 0.2173913  0.33333333
  1.        ]
 [0.36363636 0.4        0.18072289 0.02300613 0.06312292 0.05555556
  0.30414747 0.15211105 0.3875969  0.2752809  0.4057971  0.16666667
  1.        ]]

 

import tensorflow as tf
len(wine_np)

# 6497

train_idx = int(len(wine_np)*0.8) 
print(train_idx)

train_X, train_Y = wine_np[:train_idx, : -1], wine_np[:train_idx, -1]
test_X, test_Y = wine_np[train_idx:, : -1], wine_np[train_idx:, -1]
# 5197

 

print(train_X[0])
print(train_Y[0])
print(test_X[0])
print(test_Y[0])

[0.15702479 0.09333333 0.12048193 0.23619632 0.05813953 0.13888889
 0.24654378 0.22190091 0.3875969  0.13483146 0.13043478 0.5       ]
1.0
[0.23966942 0.10666667 0.21084337 0.19171779 0.06810631 0.21875
 0.45852535 0.19066898 0.33333333 0.15730337 0.2173913  0.33333333]
1.0

 

# 원핫인코딩
import tensorflow as tf
train_Y = tf.keras.utils.to_categorical(train_Y, num_classes=2)
test_Y = tf.keras.utils.to_categorical(test_Y, num_classes=2)
train_Y[0]

# array([0., 1.], dtype=float32)

 

# 분류 모델
model = tf.keras.Sequential([tf.keras.layers.Dense(units=48, activation = 'relu', input_shape=(12,)),
                             tf.keras.layers.Dense(units=24, activation = 'relu'),
                             tf.keras.layers.Dense(units=12, activation = 'relu'),
                             tf.keras.layers.Dense(units=2, activation = 'softmax'),
                             tf.keras.layers.Dense(units=2, activation = 'sigmoid')
                            ])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.07),
#              loss='categorical_crossentropy', metrics = ['accuracy'])
             loss='binary_crossentropy', metrics = ['accuracy'])

 

model.summary()
# Dense 완전연결층, 1차원배열의 형태로 데이터 학습, 이미지의 픽셀의 관계 고려 안함
#         2차원 형태의 이미지를 1차원으로 변환, 이미지 특성 잃어버림
# 대신 convolution conv2D층

Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                (None, 48)                624       
_________________________________________________________________
dense_1 (Dense)              (None, 24)                1176      
_________________________________________________________________
dense_2 (Dense)              (None, 12)                300       
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 26        
_________________________________________________________________
dense_4 (Dense)              (None, 2)                 6         
=================================================================
Total params: 2,132
Trainable params: 2,132
Non-trainable params: 0
_________________________________________________________________

 

# 학습
history = model.fit(train_X, train_Y, epochs = 25, batch_size = 32, validation_split=0.25)

Epoch 1/25
122/122 [==============================] - 2s 8ms/step - loss: 0.5704 - accuracy: 0.7708 - val_loss: 0.5771 - val_accuracy: 0.7362
Epoch 2/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5489 - accuracy: 0.7645 - val_loss: 0.5807 - val_accuracy: 0.7362
Epoch 3/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5497 - accuracy: 0.7649 - val_loss: 0.5775 - val_accuracy: 0.7362
Epoch 4/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5431 - accuracy: 0.7682 - val_loss: 0.5825 - val_accuracy: 0.7362
Epoch 5/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5570 - accuracy: 0.7586 - val_loss: 0.5770 - val_accuracy: 0.7362
Epoch 6/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5663 - accuracy: 0.7477 - val_loss: 0.5775 - val_accuracy: 0.7362
Epoch 7/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5632 - accuracy: 0.7504 - val_loss: 0.5870 - val_accuracy: 0.7362
Epoch 8/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5582 - accuracy: 0.7556 - val_loss: 0.5773 - val_accuracy: 0.7362
Epoch 9/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5576 - accuracy: 0.7564 - val_loss: 0.5783 - val_accuracy: 0.7362
Epoch 10/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5713 - accuracy: 0.7444 - val_loss: 0.5772 - val_accuracy: 0.7362
Epoch 11/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5606 - accuracy: 0.7534 - val_loss: 0.5799 - val_accuracy: 0.7362
Epoch 12/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5498 - accuracy: 0.7637 - val_loss: 0.5787 - val_accuracy: 0.7362
Epoch 13/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5517 - accuracy: 0.7620 - val_loss: 0.5854 - val_accuracy: 0.7362
Epoch 14/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5663 - accuracy: 0.7488 - val_loss: 0.5780 - val_accuracy: 0.7362
Epoch 15/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5583 - accuracy: 0.7560 - val_loss: 0.5772 - val_accuracy: 0.7362
Epoch 16/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5645 - accuracy: 0.7501 - val_loss: 0.5770 - val_accuracy: 0.7362
Epoch 17/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5686 - accuracy: 0.7456 - val_loss: 0.5860 - val_accuracy: 0.7362
Epoch 18/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5499 - accuracy: 0.7620 - val_loss: 0.5788 - val_accuracy: 0.7362
Epoch 19/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5738 - accuracy: 0.7478 - val_loss: 0.5802 - val_accuracy: 0.7362
Epoch 20/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5537 - accuracy: 0.7598 - val_loss: 0.5800 - val_accuracy: 0.7362
Epoch 21/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5449 - accuracy: 0.7661 - val_loss: 0.5911 - val_accuracy: 0.7362
Epoch 22/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5615 - accuracy: 0.7542 - val_loss: 0.5809 - val_accuracy: 0.7362
Epoch 23/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5647 - accuracy: 0.7507 - val_loss: 0.5771 - val_accuracy: 0.7362
Epoch 24/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5590 - accuracy: 0.7544 - val_loss: 0.5774 - val_accuracy: 0.7362
Epoch 25/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5519 - accuracy: 0.7623 - val_loss: 0.5882 - val_accuracy: 0.7362

 

# 결과 시각화 loss acc
import matplotlib.pyplot as plt
plt.figure(figsize = (12,4))

plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b-', label = 'loss')
plt.plot(history.history['val_loss'], 'r--', label = 'val_loss')
plt.xlabel('Epoch')
plt.legend()
plt.show()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], 'g-', label = 'accuracy')
plt.plot(history.history['val_accuracy'], 'k--', label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylim(0.7, 1)
plt.legend()
plt.show()

# 모델평가
model.evaluate(test_X, test_Y)

41/41 [==============================] - 0s 2ms/step - loss: 0.5416 - accuracy: 0.7700
[0.5416469573974609, 0.7699999809265137]

 

반응형
728x90
반응형
from tensorflow.keras.datasets import boston_housing
(train_x, train_y), (test_x, test_y) = boston_housing.load_data()

 

mean = train_x.mean(axis=0)
std = train_x.std(axis=0)
train_x = (train_x - mean) / std
test_x = (test_x - mean) / std

 

# 회귀모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential([
    Dense(units = 52, activation ='relu', input_shape=(13,)),
    Dense(39, activation ='relu'),
    Dense(26, activation ='relu'),
    Dense(1, activation ='relu'),
])

 

model.summary()

Model: "sequential_6"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_18 (Dense)             (None, 52)                728       
_________________________________________________________________
dense_19 (Dense)             (None, 39)                2067      
_________________________________________________________________
dense_20 (Dense)             (None, 26)                1040      
_________________________________________________________________
dense_21 (Dense)             (None, 1)                 27        
=================================================================
Total params: 3,862
Trainable params: 3,862
Non-trainable params: 0
_________________________________________________________________

 

model.compile(optimizer='adam', loss='mse', metrics = 'mae')

 

# 학습, 검증데이터의 손실함수값 그래프 출력
model.fit(train_x, train_y, epochs = 25, batch_size = 32, validation_split = 0.25)

Epoch 1/25
10/10 [==============================] - 0s 19ms/step - loss: 530.4370 - mae: 21.3062 - val_loss: 646.2498 - val_mae: 23.3830
Epoch 2/25
10/10 [==============================] - 0s 5ms/step - loss: 511.7708 - mae: 20.8918 - val_loss: 605.8097 - val_mae: 22.5224
Epoch 3/25
10/10 [==============================] - 0s 5ms/step - loss: 452.3462 - mae: 19.4829 - val_loss: 543.3400 - val_mae: 21.1280
Epoch 4/25
10/10 [==============================] - 0s 5ms/step - loss: 421.9097 - mae: 18.4410 - val_loss: 448.8548 - val_mae: 18.8896
Epoch 5/25
10/10 [==============================] - 0s 5ms/step - loss: 328.2960 - mae: 15.9189 - val_loss: 322.6113 - val_mae: 15.4995
Epoch 6/25
10/10 [==============================] - 0s 5ms/step - loss: 206.3778 - mae: 12.3063 - val_loss: 190.2625 - val_mae: 10.9874
Epoch 7/25
10/10 [==============================] - 0s 5ms/step - loss: 102.5738 - mae: 8.2045 - val_loss: 108.8373 - val_mae: 7.7611
Epoch 8/25
10/10 [==============================] - 0s 5ms/step - loss: 62.3362 - mae: 6.0779 - val_loss: 83.7476 - val_mae: 6.9045
Epoch 9/25
10/10 [==============================] - 0s 5ms/step - loss: 54.6748 - mae: 5.5581 - val_loss: 64.6556 - val_mae: 6.0057
Epoch 10/25
10/10 [==============================] - 0s 5ms/step - loss: 38.2626 - mae: 4.5089 - val_loss: 52.2166 - val_mae: 5.2636
Epoch 11/25
10/10 [==============================] - 0s 5ms/step - loss: 29.6534 - mae: 3.8594 - val_loss: 42.0955 - val_mae: 4.6582
Epoch 12/25
10/10 [==============================] - 0s 5ms/step - loss: 23.6841 - mae: 3.5540 - val_loss: 35.9563 - val_mae: 4.2582
Epoch 13/25
10/10 [==============================] - 0s 6ms/step - loss: 22.4709 - mae: 3.3830 - val_loss: 32.3157 - val_mae: 3.9782
Epoch 14/25
10/10 [==============================] - 0s 5ms/step - loss: 29.0002 - mae: 3.3399 - val_loss: 29.0226 - val_mae: 3.7494
Epoch 15/25
10/10 [==============================] - 0s 5ms/step - loss: 16.4506 - mae: 2.8439 - val_loss: 28.3905 - val_mae: 3.6493
Epoch 16/25
10/10 [==============================] - 0s 5ms/step - loss: 14.7652 - mae: 2.7855 - val_loss: 26.5145 - val_mae: 3.5460
Epoch 17/25
10/10 [==============================] - 0s 6ms/step - loss: 17.2251 - mae: 2.8775 - val_loss: 24.6995 - val_mae: 3.4769
Epoch 18/25
10/10 [==============================] - 0s 5ms/step - loss: 18.4610 - mae: 2.8323 - val_loss: 23.6749 - val_mae: 3.3981
Epoch 19/25
10/10 [==============================] - 0s 5ms/step - loss: 17.8006 - mae: 2.9075 - val_loss: 23.2420 - val_mae: 3.3467
Epoch 20/25
10/10 [==============================] - 0s 5ms/step - loss: 14.6954 - mae: 2.6773 - val_loss: 22.6341 - val_mae: 3.2986
Epoch 21/25
10/10 [==============================] - 0s 5ms/step - loss: 15.6623 - mae: 2.7753 - val_loss: 21.0724 - val_mae: 3.2230
Epoch 22/25
10/10 [==============================] - 0s 5ms/step - loss: 12.5014 - mae: 2.5566 - val_loss: 20.8122 - val_mae: 3.1781
Epoch 23/25
10/10 [==============================] - 0s 5ms/step - loss: 18.1167 - mae: 2.7763 - val_loss: 19.5292 - val_mae: 3.1146
Epoch 24/25
10/10 [==============================] - 0s 5ms/step - loss: 14.6604 - mae: 2.8073 - val_loss: 19.0061 - val_mae: 3.0980
Epoch 25/25
10/10 [==============================] - 0s 5ms/step - loss: 13.0764 - mae: 2.5586 - val_loss: 18.5477 - val_mae: 3.0422

 

import matplotlib.pyplot as plt
fig = plt.figure(figsize = (10,5))

ax1 = fig.add_subplot(1,2,1)
ax1.plot(history.history['loss'], 'b-', label='loss')
ax1.plot(history.history['val_loss'], 'r--', label='val_loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()

ax2 = fig.add_subplot(1,2,2)
ax2.plot(history.history['mae'], 'b-', label='mae')
ax2.plot(history.history['val_mae'], 'r--', label='val_mae')
ax2.set_xlabel('epochs')
ax2.set_ylabel('mae')
ax2.legend()

ax2.set_title('traine and val mae')
plt.xlabel('Epoch')
plt.show()

 

 

반응형
728x90
반응형

# 교차검증 // 과대적합 방지
# hold out 검증 : 데이터셋 무작위 선택,
# kfold 검증 : 데이터 k개 그룸으로 분리하고 1개는 검증 나머지는 학습

from sklearn.model_selection import KFold
from tensorflow.keras.datasets.boston_housing import load_data
from tensorflow.keras.layers import Dense
from sklearn.model_selection import KFold
import numpy as np
(x_train, y_train), (x_test, y_test) = load_data(path='boston_housing.npz', test_split=0.2, seed=777)

 

# 데이터 표준화
import numpy as np
# 평균
mean = np.mean(x_train, axis = 0)
# 표준편차
std = np.std(x_train, axis = 0) # 표준편차값
# 표준화값
x_train = (x_train - mean) / std
x_test = (x_test - mean) / std

 

k = 3
kfold = KFold(n_splits = k)
def get_model() :
    model = Sequential()
    model.add(Dense(64, activation = 'relu', input_shape = (13,)))
    model.add(Dense(32, activation = 'relu'))
    model.add(Dense(1)) # 출력값 1개 # 시그모이드하면 안됨 0~1이니깐 
    model.compile(optimizer = 'adam', loss = 'mse', metrics = ['mae'])
    return model

 

mae_list = []
for train_index, val_index in kfold.split(x_train) :
    x_train_fold, x_val_fold = x_train[train_index], x_train[val_index]
    y_train_fold, y_val_fold = y_train[train_index], y_train[val_index]
    model = get_model()
    model.fit(x_train_fold, y_train_fold, epochs = 300, validation_data = (x_val_fold, y_val_fold))
    # 평가 : 손실함수값, 평가기준값
    _, test_mae = model.evaluate(x_test, y_test)
    mae_list.append(test_mae)

 

model.evaluate(x_test, y_test)

4/4 [==============================] - 0s 1ms/step - loss: 9.3774 - mae: 2.1469
[9.377416610717773, 2.14687442779541]

 

mae_list

# [2.11413836479187, 2.008519411087036, 2.14687442779541]

 

# 최종결과
print('결과 :', np.mean(mae_list))

# 결과 : 2.089844067891439

 

# 산점도를 이용하여 실제 주택가격과 예측가격 시각화하기
results = model.predict(x_test)
results[:10]

array([[21.987972],
       [21.056334],
       [47.57793 ],
       [22.31767 ],
       [12.485436],
       [33.702785],
       [26.14967 ],
       [29.55312 ],
       [26.651386],
       [24.07104 ]], dtype=float32)

 

import matplotlib.pyplot as plt
plt.figure(figsize = (5,5))
plt.plot(y_test, results, 'b.')
plt.plot([min(y_test), max(y_test)], [min(results), max(results)], ls='--', c='.3')
plt.xlabel('y_test')
plt.ylabel('results')
plt.show()

 

반응형
728x90
반응형
# 보스턴 주책가격 딥러닝
from tensorflow.keras.datasets.boston_housing import load_data
(x_train, y_train), (x_test, y_test) = load_data(path='boston_housing.npz', test_split=0.2, seed=777)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

# (404, 13) (404,)
# (102, 13) (102,)

 

# 전처리, 표준화
# minmax normalization : x = (x - min) / (max - min) => x / max
# robust normalization : x = (x - 중간값) / (3분위값 - 1분위값)
# standardization : x = x-mean / std

import numpy as np
# 평균
mean = np.mean(x_train, axis = 0)
# 표준편차
std = np.std(x_train, axis = 0) # 표준편차값
# 표준화값
x_train = (x_train - mean) / std
x_test = (x_test - mean) / std

 

# 분리
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.33, random_state = 777)

 

# 모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(64, activation = 'relu', input_shape = (13,)))
model.add(Dense(32, activation = 'relu'))
model.add(Dense(1)) # 출력값 1개 # 시그모이드하면 안됨 0~1이니깐

 

# mse 평균 제곱오차
# mae 평균 절대오차
model.compile(optimizer = 'adam', loss = 'mse', metrics = ['mae'])

 

# 학습
history = model.fit(x_train, y_train, epochs = 10, validation_data = (x_val, y_val))

Epoch 1/10
9/9 [==============================] - 1s 18ms/step - loss: 573.3483 - mae: 22.1287 - val_loss: 590.5661 - val_mae: 22.0131
Epoch 2/10
9/9 [==============================] - 0s 6ms/step - loss: 539.8865 - mae: 21.2907 - val_loss: 566.5182 - val_mae: 21.4469
Epoch 3/10
9/9 [==============================] - 0s 6ms/step - loss: 477.1085 - mae: 19.9834 - val_loss: 535.4792 - val_mae: 20.6974
Epoch 4/10
9/9 [==============================] - 0s 6ms/step - loss: 466.8590 - mae: 19.5715 - val_loss: 493.8533 - val_mae: 19.6793
Epoch 5/10
9/9 [==============================] - 0s 6ms/step - loss: 424.9529 - mae: 18.5560 - val_loss: 438.8377 - val_mae: 18.3004
Epoch 6/10
9/9 [==============================] - 0s 5ms/step - loss: 379.3619 - mae: 17.4473 - val_loss: 369.3901 - val_mae: 16.5165
Epoch 7/10
9/9 [==============================] - 0s 6ms/step - loss: 306.1354 - mae: 15.6274 - val_loss: 289.7295 - val_mae: 14.3575
Epoch 8/10
9/9 [==============================] - 0s 6ms/step - loss: 242.9725 - mae: 13.7859 - val_loss: 208.9959 - val_mae: 11.8795
Epoch 9/10
9/9 [==============================] - 0s 6ms/step - loss: 151.2137 - mae: 10.6995 - val_loss: 139.9005 - val_mae: 9.3653
Epoch 10/10
9/9 [==============================] - 0s 5ms/step - loss: 101.8669 - mae: 8.2810 - val_loss: 93.3427 - val_mae: 7.3553

 

model.evaluate(x_test, y_test)

4/4 [==============================] - 0s 2ms/step - loss: 88.2695 - mae: 7.8024
[88.26952362060547, 7.802405834197998]

 

results = model.predict(x_test)
xval = range(len(x_test))

 

# 학습 결과 확인
# loss, acc, val_loss, val_acc를 그래프로 출력
# 훈련 및 검증데이터의 손실값을 그래프로 출력
import matplotlib.pyplot as plt

fig = plt.figure(figsize = (10,5))
ax1 = fig.add_subplot(1,2,1)
ax1.plot(xval, results, color='blue', label='predict_value')
ax1.set_title('predict value')
ax1.set_xlabel('data')
ax1.set_ylabel('price')
ax2 = fig.add_subplot(1,2,2)
ax2.plot(xval, results, color='orange', label='real_value')
ax2.set_title('real value')
ax2.set_xlabel('data')
ax2.set_ylabel('price')
plt.show()

 

fig = plt.figure(figsize = (10,5))
ax1 = fig.add_subplot(1,1,1)
ax1.plot(xval, results, color = 'blue', label = 'predict_value')
ax1.plot(xval, y_test, color = 'orange', label = 'real_value')
ax1.set_title('predict and real value')
ax1.set_xlabel('data')
ax1.set_ylabel('price')
ax1.legend()
plt.show()

 

 

반응형
728x90
반응형
# fashionmnist
from tensorflow.keras.datasets.fashion_mnist import load_data
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape)

# (60000, 28, 28)

y_train[:10]

# array([9, 0, 0, 3, 0, 2, 7, 2, 5, 5], dtype=uint8)

 

class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

 

# 임의의 데이터를 이미지로 출력
import matplotlib.pyplot as plt
import numpy as np
sample_size = 9
# 난수 생성 0 ~ 59999 중 9개
random_idx = np.random.randint(60000, size = sample_size)
plt.figure(figsize = (5, 5))
for i, idx in enumerate(random_idx) :
    plt.subplot(3,3, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(x_train[idx], cmap = 'gray')
    plt.xlabel(class_names[y_train[idx]])
plt.show()

 

# 학습데이터 테스트 데이터 정규화 0~1
x_train[:10]
x_train = x_train / 255
x_test = x_test / 255

 

from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

 

from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.3, random_state = 777)
print('훈련데이터 :',X_train.shape,', 레이블 :',Y_train.shape)
print('검증데이터 :',X_val.shape,', 레이블 :',Y_val.shape)

# 훈련데이터 : (42000, 28, 28) , 레이블 : (42000, 10)
# 검증데이터 : (18000, 28, 28) , 레이블 : (18000, 10)

 

Model 1 : 입력층 64, 은닉층 32 출력층 10

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model1 = Sequential()
model1.add(Flatten(input_shape=(28, 28))) # 2차원을 1차원으로 변경, 입력층 vs .reshape으로 1차원으로 바꿀필요없음
model1.add(Dense(64, activation = 'relu'))
model1.add(Dense(32, activation = 'relu'))
model1.add(Dense(10, activation = 'softmax'))

 

model1.summary()              # 입력층
                                # 첫번째 은닉층 64
                                # 출력층 32
                                # 출력층 10
                                
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten_3 (Flatten)          (None, 784)               0         
_________________________________________________________________
dense_9 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_10 (Dense)             (None, 32)                2080      
_________________________________________________________________
dense_11 (Dense)             (None, 10)                330       
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________

 

model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history1 = model1.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(X_val, Y_val))


Epoch 1/30
469/469 [==============================] - 1s 3ms/step - loss: 0.8711 - acc: 0.6973 - val_loss: 0.4393 - val_acc: 0.8425
Epoch 2/30
469/469 [==============================] - 1s 2ms/step - loss: 0.4209 - acc: 0.8495 - val_loss: 0.3694 - val_acc: 0.8717
Epoch 3/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3815 - acc: 0.8622 - val_loss: 0.3443 - val_acc: 0.8754
Epoch 4/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3505 - acc: 0.8727 - val_loss: 0.3208 - val_acc: 0.8841
Epoch 5/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3327 - acc: 0.8785 - val_loss: 0.3102 - val_acc: 0.8882
Epoch 6/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3199 - acc: 0.8824 - val_loss: 0.2903 - val_acc: 0.8952
Epoch 7/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3057 - acc: 0.8892 - val_loss: 0.2904 - val_acc: 0.8952
Epoch 8/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2986 - acc: 0.8901 - val_loss: 0.2779 - val_acc: 0.8995
Epoch 9/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2860 - acc: 0.8940 - val_loss: 0.2606 - val_acc: 0.9059
Epoch 10/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2795 - acc: 0.8974 - val_loss: 0.2608 - val_acc: 0.9048
Epoch 11/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2768 - acc: 0.8970 - val_loss: 0.2661 - val_acc: 0.9025
Epoch 12/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2607 - acc: 0.9033 - val_loss: 0.2498 - val_acc: 0.9096
Epoch 13/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2628 - acc: 0.9039 - val_loss: 0.2418 - val_acc: 0.9116
Epoch 14/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2509 - acc: 0.9073 - val_loss: 0.2443 - val_acc: 0.9102
Epoch 15/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2412 - acc: 0.9120 - val_loss: 0.2269 - val_acc: 0.9183
Epoch 16/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2372 - acc: 0.9133 - val_loss: 0.2412 - val_acc: 0.9125
Epoch 17/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2391 - acc: 0.9112 - val_loss: 0.2172 - val_acc: 0.9212
Epoch 18/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2262 - acc: 0.9171 - val_loss: 0.2078 - val_acc: 0.9261
Epoch 19/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2258 - acc: 0.9153 - val_loss: 0.2031 - val_acc: 0.9273
Epoch 20/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2245 - acc: 0.9169 - val_loss: 0.2010 - val_acc: 0.9285
Epoch 21/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2173 - acc: 0.9191 - val_loss: 0.2006 - val_acc: 0.9281
Epoch 22/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2162 - acc: 0.9197 - val_loss: 0.2037 - val_acc: 0.9268
Epoch 23/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2087 - acc: 0.9242 - val_loss: 0.1952 - val_acc: 0.9304
Epoch 24/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2038 - acc: 0.9250 - val_loss: 0.1937 - val_acc: 0.9313
Epoch 25/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2038 - acc: 0.9245 - val_loss: 0.2027 - val_acc: 0.9263
Epoch 26/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2032 - acc: 0.9229 - val_loss: 0.2045 - val_acc: 0.9246
Epoch 27/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1944 - acc: 0.9291 - val_loss: 0.1839 - val_acc: 0.9336
Epoch 28/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1925 - acc: 0.9286 - val_loss: 0.1966 - val_acc: 0.9277
Epoch 29/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1818 - acc: 0.9326 - val_loss: 0.1905 - val_acc: 0.9293
Epoch 30/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1872 - acc: 0.9304 - val_loss: 0.1950 - val_acc: 0.9287

 

model1.evaluate(x_test,y_test)

313/313 [==============================] - 0s 1ms/step - loss: 0.3881 - acc: 0.8763
[0.3880555033683777, 0.8762999773025513]

 

results = model1.predict(x_test)

 

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[891,   2,  14,  11,   4,   2,  72,   0,   4,   0],
       [ 10, 971,   1,  10,   4,   0,   4,   0,   0,   0],
       [ 22,   1, 787,  12,  91,   0,  87,   0,   0,   0],
       [ 37,   8,  13, 869,  37,   1,  31,   0,   4,   0],
       [  4,   0,  99,  23, 815,   1,  57,   0,   1,   0],
       [  0,   0,   0,   1,   0, 952,   0,  20,   1,  26],
       [159,   1,  77,  25,  77,   0, 657,   0,   4,   0],
       [  0,   0,   0,   0,   0,  20,   0, 884,   2,  94],
       [ 10,   1,   4,   4,  12,   3,  11,   4, 951,   0],
       [  0,   0,   0,   0,   0,   5,   1,   8,   0, 986]], dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

 

df.value_counts().sort_index()

0    1000
1    1000
2    1000
3    1000
4    1000
5    1000
6    1000
7    1000
8    1000
9    1000
dtype: int64

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.79      0.89      0.84      1000
           1       0.99      0.97      0.98      1000
           2       0.79      0.79      0.79      1000
           3       0.91      0.87      0.89      1000
           4       0.78      0.81      0.80      1000
           5       0.97      0.95      0.96      1000
           6       0.71      0.66      0.68      1000
           7       0.97      0.88      0.92      1000
           8       0.98      0.95      0.97      1000
           9       0.89      0.99      0.94      1000

    accuracy                           0.88     10000
   macro avg       0.88      0.88      0.88     10000
weighted avg       0.88      0.88      0.88     10000

 

Model 2 구성하기 ,입력층 출력 128, 은닉층 64, 은닉층 32

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model2 = Sequential()
model2.add(Flatten(input_shape=(28, 28))) # 2차원을 1차원으로 변경, 입력층 vs .reshape으로 1차원으로 바꿀필요없음
model2.add(Dense(128, activation = 'relu'))
model2.add(Dense(64, activation = 'relu'))
model2.add(Dense(32, activation = 'relu'))
model2.add(Dense(10, activation = 'softmax'))
model2.summary()

Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten_2 (Flatten)          (None, 784)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 128)               100480    
_________________________________________________________________
dense_7 (Dense)              (None, 64)                8256      
_________________________________________________________________
dense_8 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_9 (Dense)              (None, 10)                330       
=================================================================
Total params: 111,146
Trainable params: 111,146
Non-trainable params: 0
_________________________________________________________________

 

model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
histroy2 = model2.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(X_val, Y_val))

Epoch 1/30
469/469 [==============================] - 2s 3ms/step - loss: 0.8033 - acc: 0.7328 - val_loss: 0.3923 - val_acc: 0.8634
Epoch 2/30
469/469 [==============================] - 1s 3ms/step - loss: 0.4000 - acc: 0.8577 - val_loss: 0.3466 - val_acc: 0.8776
Epoch 3/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3499 - acc: 0.8721 - val_loss: 0.3355 - val_acc: 0.8779
Epoch 4/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3283 - acc: 0.8814 - val_loss: 0.3091 - val_acc: 0.8868
Epoch 5/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3073 - acc: 0.8877 - val_loss: 0.2801 - val_acc: 0.8997
Epoch 6/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2897 - acc: 0.8933 - val_loss: 0.2657 - val_acc: 0.9041
Epoch 7/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2751 - acc: 0.8974 - val_loss: 0.2663 - val_acc: 0.9022
Epoch 8/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2692 - acc: 0.8995 - val_loss: 0.2529 - val_acc: 0.9076
Epoch 9/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2634 - acc: 0.9024 - val_loss: 0.2363 - val_acc: 0.9133
Epoch 10/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2438 - acc: 0.9089 - val_loss: 0.2339 - val_acc: 0.9136
Epoch 11/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2448 - acc: 0.9083 - val_loss: 0.2393 - val_acc: 0.9090
Epoch 12/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2355 - acc: 0.9112 - val_loss: 0.2355 - val_acc: 0.9114
Epoch 13/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2243 - acc: 0.9148 - val_loss: 0.2158 - val_acc: 0.9202
Epoch 14/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2173 - acc: 0.9186 - val_loss: 0.2080 - val_acc: 0.9226
Epoch 15/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2168 - acc: 0.9197 - val_loss: 0.1917 - val_acc: 0.9295
Epoch 16/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2055 - acc: 0.9223 - val_loss: 0.1866 - val_acc: 0.9332
Epoch 17/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1978 - acc: 0.9242 - val_loss: 0.1891 - val_acc: 0.9302
Epoch 18/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2004 - acc: 0.9248 - val_loss: 0.1708 - val_acc: 0.9376
Epoch 19/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1906 - acc: 0.9285 - val_loss: 0.1878 - val_acc: 0.9294
Epoch 20/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1884 - acc: 0.9303 - val_loss: 0.1774 - val_acc: 0.9341
Epoch 21/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1841 - acc: 0.9300 - val_loss: 0.1714 - val_acc: 0.9340
Epoch 22/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1739 - acc: 0.9330 - val_loss: 0.1576 - val_acc: 0.9419
Epoch 23/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1723 - acc: 0.9344 - val_loss: 0.1664 - val_acc: 0.9353
Epoch 24/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1663 - acc: 0.9377 - val_loss: 0.1593 - val_acc: 0.9411
Epoch 25/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1593 - acc: 0.9401 - val_loss: 0.1602 - val_acc: 0.9388
Epoch 26/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1598 - acc: 0.9394 - val_loss: 0.1670 - val_acc: 0.9378
Epoch 27/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1606 - acc: 0.9389 - val_loss: 0.1579 - val_acc: 0.9382
Epoch 28/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1464 - acc: 0.9441 - val_loss: 0.1777 - val_acc: 0.9341
Epoch 29/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1471 - acc: 0.9452 - val_loss: 0.1376 - val_acc: 0.9475
Epoch 30/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1513 - acc: 0.9430 - val_loss: 0.1364 - val_acc: 0.9483

 

model2.evaluate(x_test,y_test)

313/313 [==============================] - 0s 1ms/step - loss: 0.3984 - acc: 0.8906
[0.398380845785141, 0.8906000256538391]

 

results = model2.predict(x_test)

 

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[860,   1,  11,  26,   5,   1,  89,   0,   7,   0],
       [  6, 973,   0,  15,   4,   0,   2,   0,   0,   0],
       [ 21,   1, 793,  14,  89,   1,  78,   0,   3,   0],
       [ 21,   3,   7, 900,  40,   0,  22,   0,   7,   0],
       [  1,   0,  78,  21, 856,   1,  41,   0,   2,   0],
       [  0,   0,   0,   1,   0, 979,   0,  14,   1,   5],
       [131,   1,  60,  42,  76,   0, 677,   0,  13,   0],
       [  0,   0,   0,   0,   0,  27,   0, 930,   0,  43],
       [  6,   0,   4,   4,   2,   2,   4,   3, 974,   1],
       [  0,   0,   0,   0,   0,  15,   1,  20,   0, 964]], dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.82      0.86      0.84      1000
           1       0.99      0.97      0.98      1000
           2       0.83      0.79      0.81      1000
           3       0.88      0.90      0.89      1000
           4       0.80      0.86      0.83      1000
           5       0.95      0.98      0.97      1000
           6       0.74      0.68      0.71      1000
           7       0.96      0.93      0.95      1000
           8       0.97      0.97      0.97      1000
           9       0.95      0.96      0.96      1000

    accuracy                           0.89     10000
   macro avg       0.89      0.89      0.89     10000
weighted avg       0.89      0.89      0.89     10000

 

모델 비교하기

import numpy as np
import matplotlib.pyplot as plt
def draw_loss_acc(history_1, history_2, epochs) :
    his_dict_1 = history_1.history
    his_dict_2 = history_2.history
    keys = list(his_dict_1.keys()) # ['loss', 'acc', 'val_loss', 'val_acc']
    epochs = range(1, epochs)
    fig = plt.figure(figsize = (10,10))
    ax = fig.add_subplot(1,1,1) # 여러개를 하나의 그래프로
    ax.spines['top'].set_color('none') # c로 바꾸면 외곽선이 그려짐
    ax.spines['bottom'].set_color('none')
    ax.spines['left'].set_color('none')
    ax.spines['right'].set_color('none')
    ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
    for i in range(len(his_dict_1)) :
        temp_ax = fig.add_subplot(2,2, i + 1)
        temp = keys[i%2]
        val_temp = keys[(i+2)%2 +2]
        temp_history = his_dict_1 if i < 2 else his_dict_2
        temp_ax.plot(epochs, temp_history[temp][1:], color = 'blue', label ='train_'+temp) # 손실함수 그래프
        temp_ax.plot(epochs, temp_history[val_temp][1:], color = 'orange', label = val_temp)
        if(i==1 or i==3) : # 정확도인 경우
            start, end = temp_ax.get_ylim()
            temp_ax.yaxis.set_ticks(np.arange(np.round(start, 2), end, 0.01))
        temp_ax.legend()
    ax.set_ylabel('loss', size = 20)
    ax.set_xlabel('Epochs', size = 20)
    plt.tight_layout()
    plt.show()
    
draw_loss_acc(history1, history2, 30)

 

 

history1.history.keys()
history2.history.keys()

# dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

 

반응형
728x90
반응형

# tensorflow2.0부터는 keras 내장이라 별도로 부를 필요없음

캐라스 딥러닝

1. 학습데이터 정의 => 훈련데이터, 검증데이터, 테스트데이터
2. 모델정의 => 층구성(dense)
3. 학습과정설정 => 컴파일 : 손실함수, 옵티마이져, 평가지표 acc
    손실함수 : mse: 평균제곱오차, 회귀분석
               binary+crossentry : 이항분류시
                categorical_crossentropy 다항분류시 
4. 모델학습 => fit()
    에폭(epochs) : 학습데이터의 반복횟수 지정    100번을
    배치크기(batch_size) : 학습데이터 분리       분리해서 둘림
    검증데이터(validation_data) : 1
5. 모델평가 => evaluate(), predict()

 

MNIST 데이터 인식

# mnist : 숫자학습 인식
from tensorflow.keras.datasets.mnist import load_data
(x_train, y_train),(x_test, y_test) = load_data(path='mnist.npz')
# 데이터 형태
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)

 

import matplotlib.pyplot as plt
import numpy as np
random_idx = np.random.randint(60000, size = 3)
for idx in random_idx :
    img = x_train[idx, :]
    label = y_train[idx]
    plt.figure()
    plt.imshow(img) # 이미지출력
    plt.title('%d-th data, label is %d'%(idx,label), fontsize=15)

 

# 검증데이터
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.3, random_state = 777)
print('훈련데이터 :',X_train.shape,', 레이블 :',Y_train.shape)
print('검증데이터 :',X_val.shape,', 레이블 :',Y_val.shape)

훈련데이터 : (42000, 28, 28) , 레이블 : (42000,)
검증데이터 : (18000, 28, 28) , 레이블 : (18000,)

 

# 활성화 함수 결정 : SOFTMAX => 다중분류시 사용
x = np.arange(-5.0, 5.0, 0.1)
y = np.exp(x) / np.sum(np.exp(x))
# 지수함수 x 를 합계로 나눈다
plt.plot(x, y)
plt.title("softmax Function")
plt.show()
# 절대 1이 넘지 않음
# 결과값의 합이 1

 

# 전처리
num_x_train = X_train.shape[0]
num_x_val = X_val.shape[0]
num_x_test = x_test.shape[0]

 

# x_train(42000, 28, 28)형태의 배열을
# x_train(42000, 784)형태의 2차원 배열로 변경
x_train = (X_train.reshape((num_x_train, 28*28))) / 255 # 정규화
x_val = (X_val.reshape((num_x_val, 28*28))) / 255
x_test = (x_test.reshape((num_x_test, 28*28))) / 255
print(x_train.shape)
# minMax normalization : x = (x - min) / (max - mix) => x / max
# robust normalization : x = (x - 중간값) / (3분위값 - 1분위값)
# standardization : x = x-mean / std

# (42000, 784)

 

# y_test의 0~9까지의 숫자가 각각 몇개인지 조회
type(y_test)

# numpy.ndarray

y_test[:10]

# array([7, 2, 1, 0, 4, 1, 4, 9, 5, 9], dtype=uint8)

 

import pandas as pd
df = pd.Series(y_test)
df.value_counts().sort_index()

0     980
1    1135
2    1032
3    1010
4     982
5     892
6     958
7    1028
8     974
9    1009
dtype: int64

 

# 레이블 전처리 one-hot 인코딩방식 전처리
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(Y_train)
y_val = to_categorical(Y_val)
y_test = to_categorical(y_test)
print(y_train[0])

# [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]

 

# 모델 구성하기
# 층만들기
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential() # 퍼셉트론
# 첫결과를 64개로
model.add(Dense(64, activation = 'relu', input_shape = (784, ))) 
# 첫결과를 32개로
model.add(Dense(32, activation = 'relu')) # relu 일반층에서 많이 씀
# 첫결과를 10개로
model.add(Dense(10, activation = 'softmax')) # 출력층의 결과 0~9로 분류

 

# 모델의 형태 출력하기
model.summary()
# 이름 알아서 설정         
# # 층이 3개,                  아웃풋                 가중치파라미터 
#                               64개                    50240 // 64*(784+1)
#                               32                         2080 // 32*(64+1)
#                               10                        330 //  10 * (32+1)


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_2 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_3 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_4 (Dense)              (None, 10)                330       
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________

 

# softmax 함수 구현
import numpy as np
# np.exp 지수함수 10의 1승 이면 1
def softmax(arr) : # 상대적인
    m = np.max(arr)
    arr = arr - m
    arr = np.exp(arr)
    return arr / np.sum(arr)
def sigmoid(x) : # 절대적인
    return 1 / (1+np.exp(-x))
case_1 = np.array([3.1, 3.0, 2.9])
case_2 = np.array([2.0, 1.0, 0.7])
np.set_printoptions(precision=3)
print("sigmoid:",sigmoid(case_1),",softmax :",softmax(case_1)) # 비슷하게 만듬
print("sigmoid:",sigmoid(case_2),",softmax :",softmax(case_2) ) # 

# sigmoid: [0.957 0.953 0.948] ,softmax : [0.367 0.332 0.301]
# sigmoid: [0.881 0.731 0.668] ,softmax : [0.61  0.224 0.166]

 

# 학습과정 설정
model.compile(optimizer='adam', # 옵티마이저 : adam
             loss = 'categorical_crossentropy', # 손실함수
             metrics = ['acc']) # 모니터링할 평가지표 acc

 

# 모델학습
history = model.fit(x_train, y_train,
                   epochs = 30, # 역전파를 이용해서 최적을 찾아냄
                   batch_size = 128, # 학습데이터 개수
                   validation_data = (x_val, y_val)) # 검증데이터
                   

Epoch 1/30
329/329 [==============================] - 2s 5ms/step - loss: 0.8727 - acc: 0.7424 - val_loss: 0.2460 - val_acc: 0.9320
Epoch 2/30
329/329 [==============================] - 1s 3ms/step - loss: 0.2213 - acc: 0.9367 - val_loss: 0.1820 - val_acc: 0.9483
Epoch 3/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1598 - acc: 0.9545 - val_loss: 0.1591 - val_acc: 0.9536
Epoch 4/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1280 - acc: 0.9624 - val_loss: 0.1331 - val_acc: 0.9613
Epoch 5/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1030 - acc: 0.9708 - val_loss: 0.1261 - val_acc: 0.9631
Epoch 6/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0922 - acc: 0.9746 - val_loss: 0.1256 - val_acc: 0.9618
Epoch 7/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0733 - acc: 0.9787 - val_loss: 0.1165 - val_acc: 0.9654
Epoch 8/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0734 - acc: 0.9785 - val_loss: 0.1167 - val_acc: 0.9652
Epoch 9/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0607 - acc: 0.9823 - val_loss: 0.1165 - val_acc: 0.9659
Epoch 10/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0476 - acc: 0.9864 - val_loss: 0.1121 - val_acc: 0.9668
Epoch 11/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0435 - acc: 0.9878 - val_loss: 0.1068 - val_acc: 0.9691
Epoch 12/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0429 - acc: 0.9868 - val_loss: 0.1087 - val_acc: 0.9688
Epoch 13/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0345 - acc: 0.9901 - val_loss: 0.1074 - val_acc: 0.9691
Epoch 14/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0303 - acc: 0.9915 - val_loss: 0.1146 - val_acc: 0.9682
Epoch 15/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0254 - acc: 0.9934 - val_loss: 0.1143 - val_acc: 0.9690
Epoch 16/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0258 - acc: 0.9932 - val_loss: 0.1271 - val_acc: 0.9660
Epoch 17/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0214 - acc: 0.9941 - val_loss: 0.1138 - val_acc: 0.9694
Epoch 18/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0156 - acc: 0.9966 - val_loss: 0.1177 - val_acc: 0.9705
Epoch 19/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0163 - acc: 0.9960 - val_loss: 0.1228 - val_acc: 0.9692
Epoch 20/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0197 - acc: 0.9940 - val_loss: 0.1201 - val_acc: 0.9703
Epoch 21/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0138 - acc: 0.9962 - val_loss: 0.1259 - val_acc: 0.9690
Epoch 22/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0089 - acc: 0.9985 - val_loss: 0.1308 - val_acc: 0.9684
Epoch 23/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0090 - acc: 0.9978 - val_loss: 0.1376 - val_acc: 0.9672
Epoch 24/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0104 - acc: 0.9973 - val_loss: 0.1387 - val_acc: 0.9688
Epoch 25/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0091 - acc: 0.9977 - val_loss: 0.1484 - val_acc: 0.9666
Epoch 26/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0097 - acc: 0.9971 - val_loss: 0.1367 - val_acc: 0.9697
Epoch 27/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0049 - acc: 0.9992 - val_loss: 0.1401 - val_acc: 0.9702
Epoch 28/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0041 - acc: 0.9991 - val_loss: 0.1416 - val_acc: 0.9697
Epoch 29/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0170 - acc: 0.9949 - val_loss: 0.1521 - val_acc: 0.9678
Epoch 30/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0070 - acc: 0.9976 - val_loss: 0.1527 - val_acc: 0.9689

 

history.history.keys()

# dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

 

type(history)

# tensorflow.python.keras.callbacks.History

 

type(history.history)

# dict

 

history.history

{'loss': [0.49928054213523865,
  0.2102358192205429,
  0.1598261296749115,
  0.1318138986825943,
  0.11270429939031601,
  0.09517224133014679,
  0.08382865786552429,
  0.0735907107591629,
  0.0659095048904419,
  0.05770876258611679,
  0.05159643664956093,
  0.04378071427345276,
  0.038674868643283844,
  0.034976404160261154,
  0.03131331503391266,
  0.02853371389210224,
  0.02386757917702198,
  0.02131393365561962,
  0.019856156781315804,
  0.016329089179635048,
  0.016619833186268806,
  0.016199814155697823,
  0.011386237107217312,
  0.010206134989857674,
  0.009236675687134266,
  0.00816024374216795,
  0.011310409754514694,
  0.007149739656597376,
  0.0060667796060442924,
  0.004311054944992065],
 'acc': [0.8610952496528625,
  0.9389761686325073,
  0.9542142748832703,
  0.9617618918418884,
  0.9669285416603088,
  0.9709523916244507,
  0.9750000238418579,
  0.9781190752983093,
  0.9802619218826294,
  0.9830714464187622,
  0.9849047660827637,
  0.9872142672538757,
  0.9891190528869629,
  0.9903333187103271,
  0.991428554058075,
  0.9916666746139526,
  0.9935476183891296,
  0.9939047694206238,
  0.994523823261261,
  0.9958571195602417,
  0.9951428771018982,
  0.9955000281333923,
  0.9975237846374512,
  0.9977142810821533,
  0.9980952143669128,
  0.9980238080024719,
  0.9968809485435486,
  0.9981904625892639,
  0.9988333582878113,
  0.9992856979370117],
 'val_loss': [0.24861222505569458,
  0.18751391768455505,
  0.16067922115325928,
  0.14771240949630737,
  0.1354956030845642,
  0.12186738103628159,
  0.12513096630573273,
  0.11414627730846405,
  0.10838081687688828,
  0.10804055631160736,
  0.10687518864870071,
  0.10763053596019745,
  0.10694220662117004,
  0.10424619913101196,
  0.10935620963573456,
  0.112459696829319,
  0.1159299984574318,
  0.11536701023578644,
  0.11584317684173584,
  0.12000786513090134,
  0.12605518102645874,
  0.12743493914604187,
  0.12300366163253784,
  0.13212160766124725,
  0.12954148650169373,
  0.13898710906505585,
  0.1431078463792801,
  0.1479557752609253,
  0.14380276203155518,
  0.14053674042224884],
 'val_acc': [0.9315555691719055,
  0.9463333487510681,
  0.9527222514152527,
  0.957111120223999,
  0.9597222208976746,
  0.9630555510520935,
  0.9627222418785095,
  0.9657777547836304,
  0.9682222008705139,
  0.9683333039283752,
  0.9681110978126526,
  0.9685555696487427,
  0.9692777991294861,
  0.9705555438995361,
  0.9693889021873474,
  0.9695000052452087,
  0.9695555567741394,
  0.968999981880188,
  0.9695000052452087,
  0.9689444303512573,
  0.9692222476005554,
  0.9681110978126526,
  0.9696666598320007,
  0.9697222113609314,
  0.9701666831970215,
  0.9692777991294861,
  0.9678888916969299,
  0.9691666960716248,
  0.9702222347259521,
  0.9712222218513489]}

 

# 학습 결과 확인
# loss, acc, val_loss, val_acc를 그래프로 출력
# 훈련 및 검증데이터의 손실값을 그래프로 출력
import matplotlib.pyplot as plt
his_dict = history.history # dict : loss학습손실값 acc학습정확도  // val_ 검증

loss = his_dict['loss'] # 학습데이터의 손실함수값
val_loss = his_dict['val_loss'] # 검증데이터의 손실함수값

epochs = range(1, len(loss)+1)
fig = plt.figure(figsize = (10, 5))

# 훈련 및 검증 손실 그리기
ax1 = fig.add_subplot(1,2,1)
ax1.plot(epochs, loss, color = 'blue', label = 'train_loss')
ax1.plot(epochs, val_loss, color = 'orange', label = 'val_loss')
ax1.set_title('train and val loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()

acc = his_dict['acc'] # 정확도
val_acc = his_dict['val_acc'] # 검증데이터의 정확도

# 훈련 및 검증 손실 그리기
ax2 = fig.add_subplot(1,2,2)
ax2.plot(epochs, acc, color = 'blue', label = 'train_acc')
ax2.plot(epochs, val_acc, color = 'orange', label = 'val_acc')
ax2.set_title('train and val acc')
ax2.set_xlabel('epochs')
ax2.set_ylabel('loss')
ax2.legend()
plt.show()

 

# 모델평가 : 평가데이터로 평가 
# 현재 특징 : 학습데이터 성능 좋다. 단, 검증데이터, 테스트데이터는 성능이 학습데이터 성능보다 낮다.
 # => 과대적합 => 그래프를 보니 5번에서 갈라짐 => 5번 만 하고 확인해야한다. 
#     => epochs 줄이기나, random_state 조절, test_data 조절
model.evaluate(x_test, y_test) #
# [0.12191680818796158, 0.9718999862670898] #  검증데이터랑 비슷하게 나옴

313/313 [==============================] - 0s 1ms/step - loss: 0.1437 - acc: 0.9696
[0.14366137981414795, 0.9696000218391418]

 

# 예측값 확인
np.set_printoptions(precision=7)
results = model.predict(x_test) # 예측값
import matplotlib.pyplot as plt
# argmax = results 데이터 중 가장 큰 값 가지는 인덱스 저장
arg_results = np.argmax(results, axis = -1)
idx = 6
plt.imshow(x_test[idx].reshape(28, 28))
plt.title('predicted value of the first image : '+str(arg_results[idx]), fontsize=15)
plt.show()

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[ 958,    0,    3,    2,    0,    3,    6,    3,    2,    3],
       [   1, 1118,    2,    4,    0,    0,    4,    1,    5,    0],
       [   3,    0, 1008,    5,    2,    0,    1,    7,    6,    0],
       [   0,    1,    8,  982,    1,    2,    0,    1,    7,    8],
       [   2,    0,    5,    1,  929,    0,    7,    3,    1,   34],
       [   1,    0,    0,   13,    2,  853,    9,    3,    6,    5],
       [   2,    3,    0,    1,    3,    4,  941,    2,    1,    1],
       [   0,    4,   11,    3,    3,    0,    0,  992,    2,   13],
       [   4,    0,    3,    7,    5,    5,    5,    4,  933,    8],
       [   1,    3,    0,    1,    8,    2,    1,    4,    7,  982]],
      dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

df.value_counts().sort_index()

0     980
1    1135
2    1032
3    1010
4     982
5     892
6     958
7    1028
8     974
9    1009
dtype: int64

 

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.99      0.97      0.98       980
           1       0.99      0.98      0.99      1135
           2       0.97      0.97      0.97      1032
           3       0.96      0.97      0.97      1010
           4       0.97      0.97      0.97       982
           5       0.97      0.97      0.97       892
           6       0.97      0.98      0.97       958
           7       0.97      0.97      0.97      1028
           8       0.96      0.96      0.96       974
           9       0.96      0.96      0.96      1009

    accuracy                           0.97     10000
   macro avg       0.97      0.97      0.97     10000
weighted avg       0.97      0.97      0.97     10000

 

 

반응형

+ Recent posts