728x90
반응형
# fashionmnist
from tensorflow.keras.datasets.fashion_mnist import load_data
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape)

# (60000, 28, 28)

y_train[:10]

# array([9, 0, 0, 3, 0, 2, 7, 2, 5, 5], dtype=uint8)

 

class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

 

# 임의의 데이터를 이미지로 출력
import matplotlib.pyplot as plt
import numpy as np
sample_size = 9
# 난수 생성 0 ~ 59999 중 9개
random_idx = np.random.randint(60000, size = sample_size)
plt.figure(figsize = (5, 5))
for i, idx in enumerate(random_idx) :
    plt.subplot(3,3, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(x_train[idx], cmap = 'gray')
    plt.xlabel(class_names[y_train[idx]])
plt.show()

 

# 학습데이터 테스트 데이터 정규화 0~1
x_train[:10]
x_train = x_train / 255
x_test = x_test / 255

 

from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

 

from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.3, random_state = 777)
print('훈련데이터 :',X_train.shape,', 레이블 :',Y_train.shape)
print('검증데이터 :',X_val.shape,', 레이블 :',Y_val.shape)

# 훈련데이터 : (42000, 28, 28) , 레이블 : (42000, 10)
# 검증데이터 : (18000, 28, 28) , 레이블 : (18000, 10)

 

Model 1 : 입력층 64, 은닉층 32 출력층 10

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model1 = Sequential()
model1.add(Flatten(input_shape=(28, 28))) # 2차원을 1차원으로 변경, 입력층 vs .reshape으로 1차원으로 바꿀필요없음
model1.add(Dense(64, activation = 'relu'))
model1.add(Dense(32, activation = 'relu'))
model1.add(Dense(10, activation = 'softmax'))

 

model1.summary()              # 입력층
                                # 첫번째 은닉층 64
                                # 출력층 32
                                # 출력층 10
                                
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten_3 (Flatten)          (None, 784)               0         
_________________________________________________________________
dense_9 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_10 (Dense)             (None, 32)                2080      
_________________________________________________________________
dense_11 (Dense)             (None, 10)                330       
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________

 

model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history1 = model1.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(X_val, Y_val))


Epoch 1/30
469/469 [==============================] - 1s 3ms/step - loss: 0.8711 - acc: 0.6973 - val_loss: 0.4393 - val_acc: 0.8425
Epoch 2/30
469/469 [==============================] - 1s 2ms/step - loss: 0.4209 - acc: 0.8495 - val_loss: 0.3694 - val_acc: 0.8717
Epoch 3/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3815 - acc: 0.8622 - val_loss: 0.3443 - val_acc: 0.8754
Epoch 4/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3505 - acc: 0.8727 - val_loss: 0.3208 - val_acc: 0.8841
Epoch 5/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3327 - acc: 0.8785 - val_loss: 0.3102 - val_acc: 0.8882
Epoch 6/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3199 - acc: 0.8824 - val_loss: 0.2903 - val_acc: 0.8952
Epoch 7/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3057 - acc: 0.8892 - val_loss: 0.2904 - val_acc: 0.8952
Epoch 8/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2986 - acc: 0.8901 - val_loss: 0.2779 - val_acc: 0.8995
Epoch 9/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2860 - acc: 0.8940 - val_loss: 0.2606 - val_acc: 0.9059
Epoch 10/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2795 - acc: 0.8974 - val_loss: 0.2608 - val_acc: 0.9048
Epoch 11/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2768 - acc: 0.8970 - val_loss: 0.2661 - val_acc: 0.9025
Epoch 12/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2607 - acc: 0.9033 - val_loss: 0.2498 - val_acc: 0.9096
Epoch 13/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2628 - acc: 0.9039 - val_loss: 0.2418 - val_acc: 0.9116
Epoch 14/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2509 - acc: 0.9073 - val_loss: 0.2443 - val_acc: 0.9102
Epoch 15/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2412 - acc: 0.9120 - val_loss: 0.2269 - val_acc: 0.9183
Epoch 16/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2372 - acc: 0.9133 - val_loss: 0.2412 - val_acc: 0.9125
Epoch 17/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2391 - acc: 0.9112 - val_loss: 0.2172 - val_acc: 0.9212
Epoch 18/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2262 - acc: 0.9171 - val_loss: 0.2078 - val_acc: 0.9261
Epoch 19/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2258 - acc: 0.9153 - val_loss: 0.2031 - val_acc: 0.9273
Epoch 20/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2245 - acc: 0.9169 - val_loss: 0.2010 - val_acc: 0.9285
Epoch 21/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2173 - acc: 0.9191 - val_loss: 0.2006 - val_acc: 0.9281
Epoch 22/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2162 - acc: 0.9197 - val_loss: 0.2037 - val_acc: 0.9268
Epoch 23/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2087 - acc: 0.9242 - val_loss: 0.1952 - val_acc: 0.9304
Epoch 24/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2038 - acc: 0.9250 - val_loss: 0.1937 - val_acc: 0.9313
Epoch 25/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2038 - acc: 0.9245 - val_loss: 0.2027 - val_acc: 0.9263
Epoch 26/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2032 - acc: 0.9229 - val_loss: 0.2045 - val_acc: 0.9246
Epoch 27/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1944 - acc: 0.9291 - val_loss: 0.1839 - val_acc: 0.9336
Epoch 28/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1925 - acc: 0.9286 - val_loss: 0.1966 - val_acc: 0.9277
Epoch 29/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1818 - acc: 0.9326 - val_loss: 0.1905 - val_acc: 0.9293
Epoch 30/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1872 - acc: 0.9304 - val_loss: 0.1950 - val_acc: 0.9287

 

model1.evaluate(x_test,y_test)

313/313 [==============================] - 0s 1ms/step - loss: 0.3881 - acc: 0.8763
[0.3880555033683777, 0.8762999773025513]

 

results = model1.predict(x_test)

 

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[891,   2,  14,  11,   4,   2,  72,   0,   4,   0],
       [ 10, 971,   1,  10,   4,   0,   4,   0,   0,   0],
       [ 22,   1, 787,  12,  91,   0,  87,   0,   0,   0],
       [ 37,   8,  13, 869,  37,   1,  31,   0,   4,   0],
       [  4,   0,  99,  23, 815,   1,  57,   0,   1,   0],
       [  0,   0,   0,   1,   0, 952,   0,  20,   1,  26],
       [159,   1,  77,  25,  77,   0, 657,   0,   4,   0],
       [  0,   0,   0,   0,   0,  20,   0, 884,   2,  94],
       [ 10,   1,   4,   4,  12,   3,  11,   4, 951,   0],
       [  0,   0,   0,   0,   0,   5,   1,   8,   0, 986]], dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

 

df.value_counts().sort_index()

0    1000
1    1000
2    1000
3    1000
4    1000
5    1000
6    1000
7    1000
8    1000
9    1000
dtype: int64

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.79      0.89      0.84      1000
           1       0.99      0.97      0.98      1000
           2       0.79      0.79      0.79      1000
           3       0.91      0.87      0.89      1000
           4       0.78      0.81      0.80      1000
           5       0.97      0.95      0.96      1000
           6       0.71      0.66      0.68      1000
           7       0.97      0.88      0.92      1000
           8       0.98      0.95      0.97      1000
           9       0.89      0.99      0.94      1000

    accuracy                           0.88     10000
   macro avg       0.88      0.88      0.88     10000
weighted avg       0.88      0.88      0.88     10000

 

Model 2 구성하기 ,입력층 출력 128, 은닉층 64, 은닉층 32

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model2 = Sequential()
model2.add(Flatten(input_shape=(28, 28))) # 2차원을 1차원으로 변경, 입력층 vs .reshape으로 1차원으로 바꿀필요없음
model2.add(Dense(128, activation = 'relu'))
model2.add(Dense(64, activation = 'relu'))
model2.add(Dense(32, activation = 'relu'))
model2.add(Dense(10, activation = 'softmax'))
model2.summary()

Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten_2 (Flatten)          (None, 784)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 128)               100480    
_________________________________________________________________
dense_7 (Dense)              (None, 64)                8256      
_________________________________________________________________
dense_8 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_9 (Dense)              (None, 10)                330       
=================================================================
Total params: 111,146
Trainable params: 111,146
Non-trainable params: 0
_________________________________________________________________

 

model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
histroy2 = model2.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(X_val, Y_val))

Epoch 1/30
469/469 [==============================] - 2s 3ms/step - loss: 0.8033 - acc: 0.7328 - val_loss: 0.3923 - val_acc: 0.8634
Epoch 2/30
469/469 [==============================] - 1s 3ms/step - loss: 0.4000 - acc: 0.8577 - val_loss: 0.3466 - val_acc: 0.8776
Epoch 3/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3499 - acc: 0.8721 - val_loss: 0.3355 - val_acc: 0.8779
Epoch 4/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3283 - acc: 0.8814 - val_loss: 0.3091 - val_acc: 0.8868
Epoch 5/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3073 - acc: 0.8877 - val_loss: 0.2801 - val_acc: 0.8997
Epoch 6/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2897 - acc: 0.8933 - val_loss: 0.2657 - val_acc: 0.9041
Epoch 7/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2751 - acc: 0.8974 - val_loss: 0.2663 - val_acc: 0.9022
Epoch 8/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2692 - acc: 0.8995 - val_loss: 0.2529 - val_acc: 0.9076
Epoch 9/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2634 - acc: 0.9024 - val_loss: 0.2363 - val_acc: 0.9133
Epoch 10/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2438 - acc: 0.9089 - val_loss: 0.2339 - val_acc: 0.9136
Epoch 11/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2448 - acc: 0.9083 - val_loss: 0.2393 - val_acc: 0.9090
Epoch 12/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2355 - acc: 0.9112 - val_loss: 0.2355 - val_acc: 0.9114
Epoch 13/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2243 - acc: 0.9148 - val_loss: 0.2158 - val_acc: 0.9202
Epoch 14/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2173 - acc: 0.9186 - val_loss: 0.2080 - val_acc: 0.9226
Epoch 15/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2168 - acc: 0.9197 - val_loss: 0.1917 - val_acc: 0.9295
Epoch 16/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2055 - acc: 0.9223 - val_loss: 0.1866 - val_acc: 0.9332
Epoch 17/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1978 - acc: 0.9242 - val_loss: 0.1891 - val_acc: 0.9302
Epoch 18/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2004 - acc: 0.9248 - val_loss: 0.1708 - val_acc: 0.9376
Epoch 19/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1906 - acc: 0.9285 - val_loss: 0.1878 - val_acc: 0.9294
Epoch 20/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1884 - acc: 0.9303 - val_loss: 0.1774 - val_acc: 0.9341
Epoch 21/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1841 - acc: 0.9300 - val_loss: 0.1714 - val_acc: 0.9340
Epoch 22/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1739 - acc: 0.9330 - val_loss: 0.1576 - val_acc: 0.9419
Epoch 23/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1723 - acc: 0.9344 - val_loss: 0.1664 - val_acc: 0.9353
Epoch 24/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1663 - acc: 0.9377 - val_loss: 0.1593 - val_acc: 0.9411
Epoch 25/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1593 - acc: 0.9401 - val_loss: 0.1602 - val_acc: 0.9388
Epoch 26/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1598 - acc: 0.9394 - val_loss: 0.1670 - val_acc: 0.9378
Epoch 27/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1606 - acc: 0.9389 - val_loss: 0.1579 - val_acc: 0.9382
Epoch 28/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1464 - acc: 0.9441 - val_loss: 0.1777 - val_acc: 0.9341
Epoch 29/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1471 - acc: 0.9452 - val_loss: 0.1376 - val_acc: 0.9475
Epoch 30/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1513 - acc: 0.9430 - val_loss: 0.1364 - val_acc: 0.9483

 

model2.evaluate(x_test,y_test)

313/313 [==============================] - 0s 1ms/step - loss: 0.3984 - acc: 0.8906
[0.398380845785141, 0.8906000256538391]

 

results = model2.predict(x_test)

 

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[860,   1,  11,  26,   5,   1,  89,   0,   7,   0],
       [  6, 973,   0,  15,   4,   0,   2,   0,   0,   0],
       [ 21,   1, 793,  14,  89,   1,  78,   0,   3,   0],
       [ 21,   3,   7, 900,  40,   0,  22,   0,   7,   0],
       [  1,   0,  78,  21, 856,   1,  41,   0,   2,   0],
       [  0,   0,   0,   1,   0, 979,   0,  14,   1,   5],
       [131,   1,  60,  42,  76,   0, 677,   0,  13,   0],
       [  0,   0,   0,   0,   0,  27,   0, 930,   0,  43],
       [  6,   0,   4,   4,   2,   2,   4,   3, 974,   1],
       [  0,   0,   0,   0,   0,  15,   1,  20,   0, 964]], dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.82      0.86      0.84      1000
           1       0.99      0.97      0.98      1000
           2       0.83      0.79      0.81      1000
           3       0.88      0.90      0.89      1000
           4       0.80      0.86      0.83      1000
           5       0.95      0.98      0.97      1000
           6       0.74      0.68      0.71      1000
           7       0.96      0.93      0.95      1000
           8       0.97      0.97      0.97      1000
           9       0.95      0.96      0.96      1000

    accuracy                           0.89     10000
   macro avg       0.89      0.89      0.89     10000
weighted avg       0.89      0.89      0.89     10000

 

모델 비교하기

import numpy as np
import matplotlib.pyplot as plt
def draw_loss_acc(history_1, history_2, epochs) :
    his_dict_1 = history_1.history
    his_dict_2 = history_2.history
    keys = list(his_dict_1.keys()) # ['loss', 'acc', 'val_loss', 'val_acc']
    epochs = range(1, epochs)
    fig = plt.figure(figsize = (10,10))
    ax = fig.add_subplot(1,1,1) # 여러개를 하나의 그래프로
    ax.spines['top'].set_color('none') # c로 바꾸면 외곽선이 그려짐
    ax.spines['bottom'].set_color('none')
    ax.spines['left'].set_color('none')
    ax.spines['right'].set_color('none')
    ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
    for i in range(len(his_dict_1)) :
        temp_ax = fig.add_subplot(2,2, i + 1)
        temp = keys[i%2]
        val_temp = keys[(i+2)%2 +2]
        temp_history = his_dict_1 if i < 2 else his_dict_2
        temp_ax.plot(epochs, temp_history[temp][1:], color = 'blue', label ='train_'+temp) # 손실함수 그래프
        temp_ax.plot(epochs, temp_history[val_temp][1:], color = 'orange', label = val_temp)
        if(i==1 or i==3) : # 정확도인 경우
            start, end = temp_ax.get_ylim()
            temp_ax.yaxis.set_ticks(np.arange(np.round(start, 2), end, 0.01))
        temp_ax.legend()
    ax.set_ylabel('loss', size = 20)
    ax.set_xlabel('Epochs', size = 20)
    plt.tight_layout()
    plt.show()
    
draw_loss_acc(history1, history2, 30)

 

 

history1.history.keys()
history2.history.keys()

# dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

 

반응형

+ Recent posts