728x90
반응형

# tensorflow2.0부터는 keras 내장이라 별도로 부를 필요없음

캐라스 딥러닝

1. 학습데이터 정의 => 훈련데이터, 검증데이터, 테스트데이터
2. 모델정의 => 층구성(dense)
3. 학습과정설정 => 컴파일 : 손실함수, 옵티마이져, 평가지표 acc
    손실함수 : mse: 평균제곱오차, 회귀분석
               binary+crossentry : 이항분류시
                categorical_crossentropy 다항분류시 
4. 모델학습 => fit()
    에폭(epochs) : 학습데이터의 반복횟수 지정    100번을
    배치크기(batch_size) : 학습데이터 분리       분리해서 둘림
    검증데이터(validation_data) : 1
5. 모델평가 => evaluate(), predict()

 

MNIST 데이터 인식

# mnist : 숫자학습 인식
from tensorflow.keras.datasets.mnist import load_data
(x_train, y_train),(x_test, y_test) = load_data(path='mnist.npz')
# 데이터 형태
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)

 

import matplotlib.pyplot as plt
import numpy as np
random_idx = np.random.randint(60000, size = 3)
for idx in random_idx :
    img = x_train[idx, :]
    label = y_train[idx]
    plt.figure()
    plt.imshow(img) # 이미지출력
    plt.title('%d-th data, label is %d'%(idx,label), fontsize=15)

 

# 검증데이터
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.3, random_state = 777)
print('훈련데이터 :',X_train.shape,', 레이블 :',Y_train.shape)
print('검증데이터 :',X_val.shape,', 레이블 :',Y_val.shape)

훈련데이터 : (42000, 28, 28) , 레이블 : (42000,)
검증데이터 : (18000, 28, 28) , 레이블 : (18000,)

 

# 활성화 함수 결정 : SOFTMAX => 다중분류시 사용
x = np.arange(-5.0, 5.0, 0.1)
y = np.exp(x) / np.sum(np.exp(x))
# 지수함수 x 를 합계로 나눈다
plt.plot(x, y)
plt.title("softmax Function")
plt.show()
# 절대 1이 넘지 않음
# 결과값의 합이 1

 

# 전처리
num_x_train = X_train.shape[0]
num_x_val = X_val.shape[0]
num_x_test = x_test.shape[0]

 

# x_train(42000, 28, 28)형태의 배열을
# x_train(42000, 784)형태의 2차원 배열로 변경
x_train = (X_train.reshape((num_x_train, 28*28))) / 255 # 정규화
x_val = (X_val.reshape((num_x_val, 28*28))) / 255
x_test = (x_test.reshape((num_x_test, 28*28))) / 255
print(x_train.shape)
# minMax normalization : x = (x - min) / (max - mix) => x / max
# robust normalization : x = (x - 중간값) / (3분위값 - 1분위값)
# standardization : x = x-mean / std

# (42000, 784)

 

# y_test의 0~9까지의 숫자가 각각 몇개인지 조회
type(y_test)

# numpy.ndarray

y_test[:10]

# array([7, 2, 1, 0, 4, 1, 4, 9, 5, 9], dtype=uint8)

 

import pandas as pd
df = pd.Series(y_test)
df.value_counts().sort_index()

0     980
1    1135
2    1032
3    1010
4     982
5     892
6     958
7    1028
8     974
9    1009
dtype: int64

 

# 레이블 전처리 one-hot 인코딩방식 전처리
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(Y_train)
y_val = to_categorical(Y_val)
y_test = to_categorical(y_test)
print(y_train[0])

# [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]

 

# 모델 구성하기
# 층만들기
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential() # 퍼셉트론
# 첫결과를 64개로
model.add(Dense(64, activation = 'relu', input_shape = (784, ))) 
# 첫결과를 32개로
model.add(Dense(32, activation = 'relu')) # relu 일반층에서 많이 씀
# 첫결과를 10개로
model.add(Dense(10, activation = 'softmax')) # 출력층의 결과 0~9로 분류

 

# 모델의 형태 출력하기
model.summary()
# 이름 알아서 설정         
# # 층이 3개,                  아웃풋                 가중치파라미터 
#                               64개                    50240 // 64*(784+1)
#                               32                         2080 // 32*(64+1)
#                               10                        330 //  10 * (32+1)


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_2 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_3 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_4 (Dense)              (None, 10)                330       
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________

 

# softmax 함수 구현
import numpy as np
# np.exp 지수함수 10의 1승 이면 1
def softmax(arr) : # 상대적인
    m = np.max(arr)
    arr = arr - m
    arr = np.exp(arr)
    return arr / np.sum(arr)
def sigmoid(x) : # 절대적인
    return 1 / (1+np.exp(-x))
case_1 = np.array([3.1, 3.0, 2.9])
case_2 = np.array([2.0, 1.0, 0.7])
np.set_printoptions(precision=3)
print("sigmoid:",sigmoid(case_1),",softmax :",softmax(case_1)) # 비슷하게 만듬
print("sigmoid:",sigmoid(case_2),",softmax :",softmax(case_2) ) # 

# sigmoid: [0.957 0.953 0.948] ,softmax : [0.367 0.332 0.301]
# sigmoid: [0.881 0.731 0.668] ,softmax : [0.61  0.224 0.166]

 

# 학습과정 설정
model.compile(optimizer='adam', # 옵티마이저 : adam
             loss = 'categorical_crossentropy', # 손실함수
             metrics = ['acc']) # 모니터링할 평가지표 acc

 

# 모델학습
history = model.fit(x_train, y_train,
                   epochs = 30, # 역전파를 이용해서 최적을 찾아냄
                   batch_size = 128, # 학습데이터 개수
                   validation_data = (x_val, y_val)) # 검증데이터
                   

Epoch 1/30
329/329 [==============================] - 2s 5ms/step - loss: 0.8727 - acc: 0.7424 - val_loss: 0.2460 - val_acc: 0.9320
Epoch 2/30
329/329 [==============================] - 1s 3ms/step - loss: 0.2213 - acc: 0.9367 - val_loss: 0.1820 - val_acc: 0.9483
Epoch 3/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1598 - acc: 0.9545 - val_loss: 0.1591 - val_acc: 0.9536
Epoch 4/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1280 - acc: 0.9624 - val_loss: 0.1331 - val_acc: 0.9613
Epoch 5/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1030 - acc: 0.9708 - val_loss: 0.1261 - val_acc: 0.9631
Epoch 6/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0922 - acc: 0.9746 - val_loss: 0.1256 - val_acc: 0.9618
Epoch 7/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0733 - acc: 0.9787 - val_loss: 0.1165 - val_acc: 0.9654
Epoch 8/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0734 - acc: 0.9785 - val_loss: 0.1167 - val_acc: 0.9652
Epoch 9/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0607 - acc: 0.9823 - val_loss: 0.1165 - val_acc: 0.9659
Epoch 10/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0476 - acc: 0.9864 - val_loss: 0.1121 - val_acc: 0.9668
Epoch 11/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0435 - acc: 0.9878 - val_loss: 0.1068 - val_acc: 0.9691
Epoch 12/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0429 - acc: 0.9868 - val_loss: 0.1087 - val_acc: 0.9688
Epoch 13/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0345 - acc: 0.9901 - val_loss: 0.1074 - val_acc: 0.9691
Epoch 14/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0303 - acc: 0.9915 - val_loss: 0.1146 - val_acc: 0.9682
Epoch 15/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0254 - acc: 0.9934 - val_loss: 0.1143 - val_acc: 0.9690
Epoch 16/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0258 - acc: 0.9932 - val_loss: 0.1271 - val_acc: 0.9660
Epoch 17/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0214 - acc: 0.9941 - val_loss: 0.1138 - val_acc: 0.9694
Epoch 18/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0156 - acc: 0.9966 - val_loss: 0.1177 - val_acc: 0.9705
Epoch 19/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0163 - acc: 0.9960 - val_loss: 0.1228 - val_acc: 0.9692
Epoch 20/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0197 - acc: 0.9940 - val_loss: 0.1201 - val_acc: 0.9703
Epoch 21/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0138 - acc: 0.9962 - val_loss: 0.1259 - val_acc: 0.9690
Epoch 22/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0089 - acc: 0.9985 - val_loss: 0.1308 - val_acc: 0.9684
Epoch 23/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0090 - acc: 0.9978 - val_loss: 0.1376 - val_acc: 0.9672
Epoch 24/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0104 - acc: 0.9973 - val_loss: 0.1387 - val_acc: 0.9688
Epoch 25/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0091 - acc: 0.9977 - val_loss: 0.1484 - val_acc: 0.9666
Epoch 26/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0097 - acc: 0.9971 - val_loss: 0.1367 - val_acc: 0.9697
Epoch 27/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0049 - acc: 0.9992 - val_loss: 0.1401 - val_acc: 0.9702
Epoch 28/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0041 - acc: 0.9991 - val_loss: 0.1416 - val_acc: 0.9697
Epoch 29/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0170 - acc: 0.9949 - val_loss: 0.1521 - val_acc: 0.9678
Epoch 30/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0070 - acc: 0.9976 - val_loss: 0.1527 - val_acc: 0.9689

 

history.history.keys()

# dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

 

type(history)

# tensorflow.python.keras.callbacks.History

 

type(history.history)

# dict

 

history.history

{'loss': [0.49928054213523865,
  0.2102358192205429,
  0.1598261296749115,
  0.1318138986825943,
  0.11270429939031601,
  0.09517224133014679,
  0.08382865786552429,
  0.0735907107591629,
  0.0659095048904419,
  0.05770876258611679,
  0.05159643664956093,
  0.04378071427345276,
  0.038674868643283844,
  0.034976404160261154,
  0.03131331503391266,
  0.02853371389210224,
  0.02386757917702198,
  0.02131393365561962,
  0.019856156781315804,
  0.016329089179635048,
  0.016619833186268806,
  0.016199814155697823,
  0.011386237107217312,
  0.010206134989857674,
  0.009236675687134266,
  0.00816024374216795,
  0.011310409754514694,
  0.007149739656597376,
  0.0060667796060442924,
  0.004311054944992065],
 'acc': [0.8610952496528625,
  0.9389761686325073,
  0.9542142748832703,
  0.9617618918418884,
  0.9669285416603088,
  0.9709523916244507,
  0.9750000238418579,
  0.9781190752983093,
  0.9802619218826294,
  0.9830714464187622,
  0.9849047660827637,
  0.9872142672538757,
  0.9891190528869629,
  0.9903333187103271,
  0.991428554058075,
  0.9916666746139526,
  0.9935476183891296,
  0.9939047694206238,
  0.994523823261261,
  0.9958571195602417,
  0.9951428771018982,
  0.9955000281333923,
  0.9975237846374512,
  0.9977142810821533,
  0.9980952143669128,
  0.9980238080024719,
  0.9968809485435486,
  0.9981904625892639,
  0.9988333582878113,
  0.9992856979370117],
 'val_loss': [0.24861222505569458,
  0.18751391768455505,
  0.16067922115325928,
  0.14771240949630737,
  0.1354956030845642,
  0.12186738103628159,
  0.12513096630573273,
  0.11414627730846405,
  0.10838081687688828,
  0.10804055631160736,
  0.10687518864870071,
  0.10763053596019745,
  0.10694220662117004,
  0.10424619913101196,
  0.10935620963573456,
  0.112459696829319,
  0.1159299984574318,
  0.11536701023578644,
  0.11584317684173584,
  0.12000786513090134,
  0.12605518102645874,
  0.12743493914604187,
  0.12300366163253784,
  0.13212160766124725,
  0.12954148650169373,
  0.13898710906505585,
  0.1431078463792801,
  0.1479557752609253,
  0.14380276203155518,
  0.14053674042224884],
 'val_acc': [0.9315555691719055,
  0.9463333487510681,
  0.9527222514152527,
  0.957111120223999,
  0.9597222208976746,
  0.9630555510520935,
  0.9627222418785095,
  0.9657777547836304,
  0.9682222008705139,
  0.9683333039283752,
  0.9681110978126526,
  0.9685555696487427,
  0.9692777991294861,
  0.9705555438995361,
  0.9693889021873474,
  0.9695000052452087,
  0.9695555567741394,
  0.968999981880188,
  0.9695000052452087,
  0.9689444303512573,
  0.9692222476005554,
  0.9681110978126526,
  0.9696666598320007,
  0.9697222113609314,
  0.9701666831970215,
  0.9692777991294861,
  0.9678888916969299,
  0.9691666960716248,
  0.9702222347259521,
  0.9712222218513489]}

 

# 학습 결과 확인
# loss, acc, val_loss, val_acc를 그래프로 출력
# 훈련 및 검증데이터의 손실값을 그래프로 출력
import matplotlib.pyplot as plt
his_dict = history.history # dict : loss학습손실값 acc학습정확도  // val_ 검증

loss = his_dict['loss'] # 학습데이터의 손실함수값
val_loss = his_dict['val_loss'] # 검증데이터의 손실함수값

epochs = range(1, len(loss)+1)
fig = plt.figure(figsize = (10, 5))

# 훈련 및 검증 손실 그리기
ax1 = fig.add_subplot(1,2,1)
ax1.plot(epochs, loss, color = 'blue', label = 'train_loss')
ax1.plot(epochs, val_loss, color = 'orange', label = 'val_loss')
ax1.set_title('train and val loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()

acc = his_dict['acc'] # 정확도
val_acc = his_dict['val_acc'] # 검증데이터의 정확도

# 훈련 및 검증 손실 그리기
ax2 = fig.add_subplot(1,2,2)
ax2.plot(epochs, acc, color = 'blue', label = 'train_acc')
ax2.plot(epochs, val_acc, color = 'orange', label = 'val_acc')
ax2.set_title('train and val acc')
ax2.set_xlabel('epochs')
ax2.set_ylabel('loss')
ax2.legend()
plt.show()

 

# 모델평가 : 평가데이터로 평가 
# 현재 특징 : 학습데이터 성능 좋다. 단, 검증데이터, 테스트데이터는 성능이 학습데이터 성능보다 낮다.
 # => 과대적합 => 그래프를 보니 5번에서 갈라짐 => 5번 만 하고 확인해야한다. 
#     => epochs 줄이기나, random_state 조절, test_data 조절
model.evaluate(x_test, y_test) #
# [0.12191680818796158, 0.9718999862670898] #  검증데이터랑 비슷하게 나옴

313/313 [==============================] - 0s 1ms/step - loss: 0.1437 - acc: 0.9696
[0.14366137981414795, 0.9696000218391418]

 

# 예측값 확인
np.set_printoptions(precision=7)
results = model.predict(x_test) # 예측값
import matplotlib.pyplot as plt
# argmax = results 데이터 중 가장 큰 값 가지는 인덱스 저장
arg_results = np.argmax(results, axis = -1)
idx = 6
plt.imshow(x_test[idx].reshape(28, 28))
plt.title('predicted value of the first image : '+str(arg_results[idx]), fontsize=15)
plt.show()

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[ 958,    0,    3,    2,    0,    3,    6,    3,    2,    3],
       [   1, 1118,    2,    4,    0,    0,    4,    1,    5,    0],
       [   3,    0, 1008,    5,    2,    0,    1,    7,    6,    0],
       [   0,    1,    8,  982,    1,    2,    0,    1,    7,    8],
       [   2,    0,    5,    1,  929,    0,    7,    3,    1,   34],
       [   1,    0,    0,   13,    2,  853,    9,    3,    6,    5],
       [   2,    3,    0,    1,    3,    4,  941,    2,    1,    1],
       [   0,    4,   11,    3,    3,    0,    0,  992,    2,   13],
       [   4,    0,    3,    7,    5,    5,    5,    4,  933,    8],
       [   1,    3,    0,    1,    8,    2,    1,    4,    7,  982]],
      dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

df.value_counts().sort_index()

0     980
1    1135
2    1032
3    1010
4     982
5     892
6     958
7    1028
8     974
9    1009
dtype: int64

 

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.99      0.97      0.98       980
           1       0.99      0.98      0.99      1135
           2       0.97      0.97      0.97      1032
           3       0.96      0.97      0.97      1010
           4       0.97      0.97      0.97       982
           5       0.97      0.97      0.97       892
           6       0.97      0.98      0.97       958
           7       0.97      0.97      0.97      1028
           8       0.96      0.96      0.96       974
           9       0.96      0.96      0.96      1009

    accuracy                           0.97     10000
   macro avg       0.97      0.97      0.97     10000
weighted avg       0.97      0.97      0.97     10000

 

 

반응형

+ Recent posts