728x90
반응형
import tensorflow as tf
import matplotlib.pyplot as plt
image_path = tf.keras.utils.get_file('cat.jpg', 'http://bit.ly/33U6mH9')
# imread : 이미지값을 숫자로 리턴
image = plt.imread(image_path)
print(image.shape)

# (241, 320, 3)

 

print(image[0])

[[223 220 203]
 [224 221 204]
 [224 221 204]
 [224 221 204]
 [224 221 204]
 [225 222 205]
 [225 222 205]
 [225 222 205]
 [224 221 204]
 [224 221 204]
 [224 221 204]
 [225 222 205]
 [225 222 205]
 [225 222 205]
 [225 222 205]
 [226 223 206]
 [224 225 207]
 [224 225 207]
 [226 225 207]
 [226 225 207]
 [226 225 207]
 [226 225 207]
 [226 225 207]
 [227 224 207]
 [226 223 206]
 [226 223 206]
 [226 223 206]
 [226 223 206]
 [228 222 206]
 [229 223 207]
 [229 223 207]
 [229 223 207]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [228 225 206]
 [230 223 204]
 [230 223 204]
 [231 224 205]
 [231 224 205]
 [232 225 206]
 [232 225 206]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [231 224 205]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [230 227 208]
 [230 227 208]
 [231 228 209]
 [231 228 209]
 [231 228 209]
 [231 228 211]
 [232 229 212]
 [232 229 212]
 [233 230 215]
 [233 230 215]
 [233 229 217]
 [233 229 217]
 [233 229 217]
 [233 229 217]
 [233 229 217]
 [233 229 218]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [232 229 214]
 [231 228 213]
 [230 227 212]
 [230 227 212]
 [229 226 211]
 [229 226 211]
 [230 227 212]
 [230 227 212]
 [231 228 213]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [232 229 212]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [231 228 211]
 [230 227 210]
 [230 227 210]
 [230 227 210]
 [234 227 209]
 [234 227 209]
 [234 227 209]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [232 225 207]
 [234 227 209]
 [234 227 209]
 [234 227 209]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [233 226 208]
 [232 225 207]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [232 225 206]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [233 225 204]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [232 227 208]
 [233 228 209]
 [233 228 209]
 [233 228 209]
 [233 228 209]
 [234 229 210]
 [234 229 210]
 [233 228 209]
 [233 228 209]
 [234 229 209]
 [235 229 207]
 [236 230 208]
 [236 230 206]
 [237 231 205]
 [237 231 205]
 [235 229 205]
 [236 232 207]
 [236 231 211]
 [232 229 210]
 [232 231 213]
 [235 233 220]
 [233 233 221]
 [231 231 219]
 [246 228 206]
 [245 227 205]
 [237 215 194]
 [217 190 169]
 [195 164 143]
 [181 145 123]
 [172 131 109]
 [168 123 100]
 [153 107  83]
 [150 104  78]
 [149 104  75]
 [150 106  77]
 [149 108  76]
 [147 109  73]
 [146 110  74]
 [149 113  77]
 [160 126  99]
 [165 133 110]
 [170 142 121]
 [174 148 131]
 [179 157 146]
 [183 166 159]
 [184 170 167]
 [180 170 169]
 [182 173 174]
 [188 179 182]
 [192 183 184]
 [194 184 183]
 [197 186 182]
 [197 186 180]
 [190 177 168]
 [179 165 156]
 [182 165 155]
 [184 168 155]
 [188 170 156]
 [188 169 154]
 [181 160 141]
 [172 150 129]
 [170 148 125]
 [173 151 128]
 [180 158 135]
 [176 156 132]
 [195 179 156]
 [226 211 190]
 [234 223 205]
 [231 224 206]
 [231 225 209]
 [232 226 212]
 [229 221 200]
 [229 221 200]
 [228 220 199]
 [228 220 199]
 [227 219 198]
 [227 219 198]
 [226 218 197]
 [226 218 197]
 [225 217 196]
 [225 217 196]
 [225 217 196]
 [225 217 196]
 [224 216 195]
 [224 216 195]
 [224 216 195]
 [224 216 195]
 [224 217 199]
 [224 217 199]
 [224 217 199]
 [224 217 199]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [223 216 198]
 [222 215 197]
 [222 215 197]
 [222 215 197]
 [222 215 197]
 [221 214 196]
 [221 214 196]
 [220 215 193]
 [220 215 193]
 [220 215 195]
 [220 215 195]
 [219 214 194]
 [219 214 194]
 [219 214 195]
 [219 214 195]
 [219 213 197]
 [218 212 196]
 [218 212 196]
 [218 212 198]
 [218 212 198]
 [217 211 197]
 [217 211 197]
 [217 211 197]
 [221 213 192]
 [221 213 192]
 [221 213 192]
 [221 213 192]
 [221 213 192]
 [219 212 193]
 [219 212 193]
 [219 212 193]
 [217 210 191]
 [217 212 193]
 [217 212 193]
 [217 212 193]
 [217 212 193]
 [218 213 194]
 [218 213 194]
 [216 213 194]]

 

titles = ['RGB Image', 'Red channel', 'Green channel', 'Blue channel']
from numpy import array, zeros_like
def channel(image, color) :
    if color not in (0, 1, 2) : return image
    c = image[..., color]
    print(c)
    z = zeros_like(c)
    return array([(c,z,z),(z,c,z),(z,z,c)][color]).transpose(1,2,0) # 행 배열의 형태를 변경
colors = range(-1, 3)
fig, axes = plt.subplots(1, 4, figsize = (13, 3))
objs = zip(axes, titles, colors)
for ax, title, color in objs :
    ax.imshow(channel(image, color))
    ax.set_title(title)
    ax.set_xticks(())
    ax.set_yticks(())
plt.show()




[[223 224 224 ... 218 218 216]
 [224 224 224 ... 218 218 216]
 [224 224 224 ... 218 216 216]
 ...
 [138 138 144 ... 198 196 194]
 [144 134 135 ... 200 200 200]
 [148 145 143 ... 200 198 196]]
[[220 221 221 ... 213 213 213]
 [221 221 221 ... 213 213 213]
 [221 221 221 ... 213 213 213]
 ...
 [ 96  96 101 ... 191 189 187]
 [102  92  92 ... 193 193 193]
 [101  98  97 ... 193 191 189]]
[[203 204 204 ... 194 194 194]
 [204 204 204 ... 194 194 194]
 [204 204 204 ... 194 194 196]
 ...
 [ 38  38  46 ... 172 170 168]
 [ 44  34  37 ... 174 174 174]
 [ 49  46  47 ... 174 172 170]]

# Conv2D 컨볼류션 층
# kernel_size : 3행3열 이미지분석단위
# strides : 이미지분석을 위해 이동하는 픽셀수 기본값은 1
# padding : 분석결과의 픽셀이 줄지 않도록 패딩을 설정
        # valide : 패딩설정 안함 // same : 패딩설정함
# filters : 층의 갯수

 

conv1 = tf.keras.layers.Conv2D(kernel_size=(3,3),strides=(2,2), padding='valid',filters=16)

# Maxpool2D : 최대플링층
# pool_size  : 플링칭의 형태

pool1 = tf.keras.layers.MaxPool2D(pool_size=(2,2),strides=(2,2))

# dropout 층
# 과대적합을 해소하기 위한 옵션
# rate 만큼 분석 제외

pool1 = tf.keras.layers.Dropout(rate=0.3)

 

 

 

반응형
728x90
반응형
import pandas as pd
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/"
red = pd.read_csv(url+'winequality-red.csv',sep=';')
white = pd.read_csv(url+'winequality-white.csv',sep=';')

 

print(red.head())

   fixed acidity  volatile acidity  citric acid  residual sugar  chlorides  \
0            7.4              0.70         0.00             1.9      0.076   
1            7.8              0.88         0.00             2.6      0.098   
2            7.8              0.76         0.04             2.3      0.092   
3           11.2              0.28         0.56             1.9      0.075   
4            7.4              0.70         0.00             1.9      0.076   

   free sulfur dioxide  total sulfur dioxide  density    pH  sulphates  \
0                 11.0                  34.0   0.9978  3.51       0.56   
1                 25.0                  67.0   0.9968  3.20       0.68   
2                 15.0                  54.0   0.9970  3.26       0.65   
3                 17.0                  60.0   0.9980  3.16       0.58   
4                 11.0                  34.0   0.9978  3.51       0.56   

   alcohol  quality  
0      9.4        5  
1      9.8        5  
2      9.8        5  
3      9.8        6  
4      9.4        5

 

red.info()

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1599 entries, 0 to 1598
Data columns (total 12 columns):
 #   Column                Non-Null Count  Dtype  
---  ------                --------------  -----  
 0   fixed acidity         1599 non-null   float64
 1   volatile acidity      1599 non-null   float64
 2   citric acid           1599 non-null   float64
 3   residual sugar        1599 non-null   float64
 4   chlorides             1599 non-null   float64
 5   free sulfur dioxide   1599 non-null   float64
 6   total sulfur dioxide  1599 non-null   float64
 7   density               1599 non-null   float64
 8   pH                    1599 non-null   float64
 9   sulphates             1599 non-null   float64
 10  alcohol               1599 non-null   float64
 11  quality               1599 non-null   int64  
dtypes: float64(11), int64(1)
memory usage: 150.0 KB

 

print(white.head())

   fixed acidity  volatile acidity  citric acid  residual sugar  chlorides  \
0            7.0              0.27         0.36            20.7      0.045   
1            6.3              0.30         0.34             1.6      0.049   
2            8.1              0.28         0.40             6.9      0.050   
3            7.2              0.23         0.32             8.5      0.058   
4            7.2              0.23         0.32             8.5      0.058   

   free sulfur dioxide  total sulfur dioxide  density    pH  sulphates  \
0                 45.0                 170.0   1.0010  3.00       0.45   
1                 14.0                 132.0   0.9940  3.30       0.49   
2                 30.0                  97.0   0.9951  3.26       0.44   
3                 47.0                 186.0   0.9956  3.19       0.40   
4                 47.0                 186.0   0.9956  3.19       0.40   

   alcohol  quality  
0      8.8        6  
1      9.5        6  
2     10.1        6  
3      9.9        6  
4      9.9        6

 

white.info()

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4898 entries, 0 to 4897
Data columns (total 12 columns):
 #   Column                Non-Null Count  Dtype  
---  ------                --------------  -----  
 0   fixed acidity         4898 non-null   float64
 1   volatile acidity      4898 non-null   float64
 2   citric acid           4898 non-null   float64
 3   residual sugar        4898 non-null   float64
 4   chlorides             4898 non-null   float64
 5   free sulfur dioxide   4898 non-null   float64
 6   total sulfur dioxide  4898 non-null   float64
 7   density               4898 non-null   float64
 8   pH                    4898 non-null   float64
 9   sulphates             4898 non-null   float64
 10  alcohol               4898 non-null   float64
 11  quality               4898 non-null   int64  
dtypes: float64(11), int64(1)
memory usage: 459.3 KB

 

red['type'] = 0
white['type'] = 1
wine = pd.concat([red, white])
wine.info()

<class 'pandas.core.frame.DataFrame'>
Int64Index: 6497 entries, 0 to 4897
Data columns (total 13 columns):
 #   Column                Non-Null Count  Dtype  
---  ------                --------------  -----  
 0   fixed acidity         6497 non-null   float64
 1   volatile acidity      6497 non-null   float64
 2   citric acid           6497 non-null   float64
 3   residual sugar        6497 non-null   float64
 4   chlorides             6497 non-null   float64
 5   free sulfur dioxide   6497 non-null   float64
 6   total sulfur dioxide  6497 non-null   float64
 7   density               6497 non-null   float64
 8   pH                    6497 non-null   float64
 9   sulphates             6497 non-null   float64
 10  alcohol               6497 non-null   float64
 11  quality               6497 non-null   int64  
 12  type                  6497 non-null   int64  
dtypes: float64(11), int64(2)
memory usage: 710.6 KB

 

# 데이터 정보
wine.describe()

	fixed acidity	volatile acidity	citric acid	residual sugar	chlorides	free sulfur dioxide	total sulfur dioxide	density	pH	sulphates	alcohol	quality	type
count	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000
mean	7.215307	0.339666	0.318633	5.443235	0.056034	30.525319	115.744574	0.994697	3.218501	0.531268	10.491801	5.818378	0.753886
std	1.296434	0.164636	0.145318	4.757804	0.035034	17.749400	56.521855	0.002999	0.160787	0.148806	1.192712	0.873255	0.430779
min	3.800000	0.080000	0.000000	0.600000	0.009000	1.000000	6.000000	0.987110	2.720000	0.220000	8.000000	3.000000	0.000000
25%	6.400000	0.230000	0.250000	1.800000	0.038000	17.000000	77.000000	0.992340	3.110000	0.430000	9.500000	5.000000	1.000000
50%	7.000000	0.290000	0.310000	3.000000	0.047000	29.000000	118.000000	0.994890	3.210000	0.510000	10.300000	6.000000	1.000000
75%	7.700000	0.400000	0.390000	8.100000	0.065000	41.000000	156.000000	0.996990	3.320000	0.600000	11.300000	6.000000	1.000000
max	15.900000	1.580000	1.660000	65.800000	0.611000	289.000000	440.000000	1.038980	4.010000	2.000000	14.900000	9.000000	1.000000

 

wine.type.value_counts()

1    4898
0    1599
Name: type, dtype: int64

 

wine_norm = (wine - wine.min()) / (wine.max() - wine.min())

wine_norm.describe()
	fixed acidity	volatile acidity	citric acid	residual sugar	chlorides	free sulfur dioxide	total sulfur dioxide	density	pH	sulphates	alcohol	quality	type
count	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000	6497.000000
mean	0.282257	0.173111	0.191948	0.074283	0.078129	0.102518	0.252868	0.146262	0.386435	0.174870	0.361131	0.469730	0.753886
std	0.107143	0.109758	0.087541	0.072972	0.058195	0.061630	0.130235	0.057811	0.124641	0.083599	0.172857	0.145543	0.430779
min	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000	0.000000
25%	0.214876	0.100000	0.150602	0.018405	0.048173	0.055556	0.163594	0.100829	0.302326	0.117978	0.217391	0.333333	1.000000
50%	0.264463	0.140000	0.186747	0.036810	0.063123	0.097222	0.258065	0.149990	0.379845	0.162921	0.333333	0.500000	1.000000
75%	0.322314	0.213333	0.234940	0.115031	0.093023	0.138889	0.345622	0.190476	0.465116	0.213483	0.478261	0.500000	1.000000
max	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000	1.000000

 

wine_norm.head()

	fixed acidity	volatile acidity	citric acid	residual sugar	chlorides	free sulfur dioxide	total sulfur dioxide	density	pH	sulphates	alcohol	quality	type
0	0.297521	0.413333	0.000000	0.019939	0.111296	0.034722	0.064516	0.206092	0.612403	0.191011	0.202899	0.333333	0.0
1	0.330579	0.533333	0.000000	0.030675	0.147841	0.083333	0.140553	0.186813	0.372093	0.258427	0.260870	0.333333	0.0
2	0.330579	0.453333	0.024096	0.026074	0.137874	0.048611	0.110599	0.190669	0.418605	0.241573	0.260870	0.333333	0.0
3	0.611570	0.133333	0.337349	0.019939	0.109635	0.055556	0.124424	0.209948	0.341085	0.202247	0.260870	0.500000	0.0
4	0.297521	0.413333	0.000000	0.019939	0.111296	0.034722	0.064516	0.206092	0.612403	0.191011	0.202899	0.333333	0.0

 

# wine_norm # 섞기
import numpy as np
wine_shuffle = wine_norm.sample(frac=1)
print(wine_shuffle.head())

     fixed acidity  volatile acidity  citric acid  residual sugar  chlorides  \
2593       0.206612          0.086667     0.174699        0.170245   0.064784   
1273       0.305785          0.333333     0.120482        0.021472   0.106312   
1348       0.280992          0.383333     0.018072        0.018405   0.114618   
665        0.330579          0.133333     0.132530        0.012270   0.078073   
1040       0.363636          0.400000     0.180723        0.023006   0.063123   

      free sulfur dioxide  total sulfur dioxide   density        pH  \
2593             0.166667              0.324885  0.148641  0.387597   
1273             0.114583              0.087558  0.150954  0.294574   
1348             0.020833              0.013825  0.168884  0.480620   
665              0.079861              0.285714  0.140544  0.434109   
1040             0.055556              0.304147  0.152111  0.387597   

      sulphates   alcohol   quality  type  
2593   0.089888  0.405797  0.333333   1.0  
1273   0.117978  0.188406  0.333333   0.0  
1348   0.095506  0.217391  0.333333   0.0  
665    0.146067  0.217391  0.333333   1.0  
1040   0.275281  0.405797  0.166667   1.0

 

wine_np = wine_shuffle.to_numpy()
print(wine_np[:5])

[[0.20661157 0.08666667 0.1746988  0.1702454  0.06478405 0.16666667
  0.32488479 0.14864083 0.3875969  0.08988764 0.4057971  0.33333333
  1.        ]
 [0.30578512 0.33333333 0.12048193 0.02147239 0.10631229 0.11458333
  0.0875576  0.15095431 0.29457364 0.11797753 0.1884058  0.33333333
  0.        ]
 [0.28099174 0.38333333 0.01807229 0.01840491 0.11461794 0.02083333
  0.01382488 0.16888375 0.48062016 0.09550562 0.2173913  0.33333333
  0.        ]
 [0.33057851 0.13333333 0.13253012 0.01226994 0.07807309 0.07986111
  0.28571429 0.14054367 0.43410853 0.14606742 0.2173913  0.33333333
  1.        ]
 [0.36363636 0.4        0.18072289 0.02300613 0.06312292 0.05555556
  0.30414747 0.15211105 0.3875969  0.2752809  0.4057971  0.16666667
  1.        ]]

 

import tensorflow as tf
len(wine_np)

# 6497

train_idx = int(len(wine_np)*0.8) 
print(train_idx)

train_X, train_Y = wine_np[:train_idx, : -1], wine_np[:train_idx, -1]
test_X, test_Y = wine_np[train_idx:, : -1], wine_np[train_idx:, -1]
# 5197

 

print(train_X[0])
print(train_Y[0])
print(test_X[0])
print(test_Y[0])

[0.15702479 0.09333333 0.12048193 0.23619632 0.05813953 0.13888889
 0.24654378 0.22190091 0.3875969  0.13483146 0.13043478 0.5       ]
1.0
[0.23966942 0.10666667 0.21084337 0.19171779 0.06810631 0.21875
 0.45852535 0.19066898 0.33333333 0.15730337 0.2173913  0.33333333]
1.0

 

# 원핫인코딩
import tensorflow as tf
train_Y = tf.keras.utils.to_categorical(train_Y, num_classes=2)
test_Y = tf.keras.utils.to_categorical(test_Y, num_classes=2)
train_Y[0]

# array([0., 1.], dtype=float32)

 

# 분류 모델
model = tf.keras.Sequential([tf.keras.layers.Dense(units=48, activation = 'relu', input_shape=(12,)),
                             tf.keras.layers.Dense(units=24, activation = 'relu'),
                             tf.keras.layers.Dense(units=12, activation = 'relu'),
                             tf.keras.layers.Dense(units=2, activation = 'softmax'),
                             tf.keras.layers.Dense(units=2, activation = 'sigmoid')
                            ])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.07),
#              loss='categorical_crossentropy', metrics = ['accuracy'])
             loss='binary_crossentropy', metrics = ['accuracy'])

 

model.summary()
# Dense 완전연결층, 1차원배열의 형태로 데이터 학습, 이미지의 픽셀의 관계 고려 안함
#         2차원 형태의 이미지를 1차원으로 변환, 이미지 특성 잃어버림
# 대신 convolution conv2D층

Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                (None, 48)                624       
_________________________________________________________________
dense_1 (Dense)              (None, 24)                1176      
_________________________________________________________________
dense_2 (Dense)              (None, 12)                300       
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 26        
_________________________________________________________________
dense_4 (Dense)              (None, 2)                 6         
=================================================================
Total params: 2,132
Trainable params: 2,132
Non-trainable params: 0
_________________________________________________________________

 

# 학습
history = model.fit(train_X, train_Y, epochs = 25, batch_size = 32, validation_split=0.25)

Epoch 1/25
122/122 [==============================] - 2s 8ms/step - loss: 0.5704 - accuracy: 0.7708 - val_loss: 0.5771 - val_accuracy: 0.7362
Epoch 2/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5489 - accuracy: 0.7645 - val_loss: 0.5807 - val_accuracy: 0.7362
Epoch 3/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5497 - accuracy: 0.7649 - val_loss: 0.5775 - val_accuracy: 0.7362
Epoch 4/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5431 - accuracy: 0.7682 - val_loss: 0.5825 - val_accuracy: 0.7362
Epoch 5/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5570 - accuracy: 0.7586 - val_loss: 0.5770 - val_accuracy: 0.7362
Epoch 6/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5663 - accuracy: 0.7477 - val_loss: 0.5775 - val_accuracy: 0.7362
Epoch 7/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5632 - accuracy: 0.7504 - val_loss: 0.5870 - val_accuracy: 0.7362
Epoch 8/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5582 - accuracy: 0.7556 - val_loss: 0.5773 - val_accuracy: 0.7362
Epoch 9/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5576 - accuracy: 0.7564 - val_loss: 0.5783 - val_accuracy: 0.7362
Epoch 10/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5713 - accuracy: 0.7444 - val_loss: 0.5772 - val_accuracy: 0.7362
Epoch 11/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5606 - accuracy: 0.7534 - val_loss: 0.5799 - val_accuracy: 0.7362
Epoch 12/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5498 - accuracy: 0.7637 - val_loss: 0.5787 - val_accuracy: 0.7362
Epoch 13/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5517 - accuracy: 0.7620 - val_loss: 0.5854 - val_accuracy: 0.7362
Epoch 14/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5663 - accuracy: 0.7488 - val_loss: 0.5780 - val_accuracy: 0.7362
Epoch 15/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5583 - accuracy: 0.7560 - val_loss: 0.5772 - val_accuracy: 0.7362
Epoch 16/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5645 - accuracy: 0.7501 - val_loss: 0.5770 - val_accuracy: 0.7362
Epoch 17/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5686 - accuracy: 0.7456 - val_loss: 0.5860 - val_accuracy: 0.7362
Epoch 18/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5499 - accuracy: 0.7620 - val_loss: 0.5788 - val_accuracy: 0.7362
Epoch 19/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5738 - accuracy: 0.7478 - val_loss: 0.5802 - val_accuracy: 0.7362
Epoch 20/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5537 - accuracy: 0.7598 - val_loss: 0.5800 - val_accuracy: 0.7362
Epoch 21/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5449 - accuracy: 0.7661 - val_loss: 0.5911 - val_accuracy: 0.7362
Epoch 22/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5615 - accuracy: 0.7542 - val_loss: 0.5809 - val_accuracy: 0.7362
Epoch 23/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5647 - accuracy: 0.7507 - val_loss: 0.5771 - val_accuracy: 0.7362
Epoch 24/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5590 - accuracy: 0.7544 - val_loss: 0.5774 - val_accuracy: 0.7362
Epoch 25/25
122/122 [==============================] - 0s 4ms/step - loss: 0.5519 - accuracy: 0.7623 - val_loss: 0.5882 - val_accuracy: 0.7362

 

# 결과 시각화 loss acc
import matplotlib.pyplot as plt
plt.figure(figsize = (12,4))

plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b-', label = 'loss')
plt.plot(history.history['val_loss'], 'r--', label = 'val_loss')
plt.xlabel('Epoch')
plt.legend()
plt.show()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], 'g-', label = 'accuracy')
plt.plot(history.history['val_accuracy'], 'k--', label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylim(0.7, 1)
plt.legend()
plt.show()

# 모델평가
model.evaluate(test_X, test_Y)

41/41 [==============================] - 0s 2ms/step - loss: 0.5416 - accuracy: 0.7700
[0.5416469573974609, 0.7699999809265137]

 

반응형
728x90
반응형
from tensorflow.keras.datasets import boston_housing
(train_x, train_y), (test_x, test_y) = boston_housing.load_data()

 

mean = train_x.mean(axis=0)
std = train_x.std(axis=0)
train_x = (train_x - mean) / std
test_x = (test_x - mean) / std

 

# 회귀모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential([
    Dense(units = 52, activation ='relu', input_shape=(13,)),
    Dense(39, activation ='relu'),
    Dense(26, activation ='relu'),
    Dense(1, activation ='relu'),
])

 

model.summary()

Model: "sequential_6"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_18 (Dense)             (None, 52)                728       
_________________________________________________________________
dense_19 (Dense)             (None, 39)                2067      
_________________________________________________________________
dense_20 (Dense)             (None, 26)                1040      
_________________________________________________________________
dense_21 (Dense)             (None, 1)                 27        
=================================================================
Total params: 3,862
Trainable params: 3,862
Non-trainable params: 0
_________________________________________________________________

 

model.compile(optimizer='adam', loss='mse', metrics = 'mae')

 

# 학습, 검증데이터의 손실함수값 그래프 출력
model.fit(train_x, train_y, epochs = 25, batch_size = 32, validation_split = 0.25)

Epoch 1/25
10/10 [==============================] - 0s 19ms/step - loss: 530.4370 - mae: 21.3062 - val_loss: 646.2498 - val_mae: 23.3830
Epoch 2/25
10/10 [==============================] - 0s 5ms/step - loss: 511.7708 - mae: 20.8918 - val_loss: 605.8097 - val_mae: 22.5224
Epoch 3/25
10/10 [==============================] - 0s 5ms/step - loss: 452.3462 - mae: 19.4829 - val_loss: 543.3400 - val_mae: 21.1280
Epoch 4/25
10/10 [==============================] - 0s 5ms/step - loss: 421.9097 - mae: 18.4410 - val_loss: 448.8548 - val_mae: 18.8896
Epoch 5/25
10/10 [==============================] - 0s 5ms/step - loss: 328.2960 - mae: 15.9189 - val_loss: 322.6113 - val_mae: 15.4995
Epoch 6/25
10/10 [==============================] - 0s 5ms/step - loss: 206.3778 - mae: 12.3063 - val_loss: 190.2625 - val_mae: 10.9874
Epoch 7/25
10/10 [==============================] - 0s 5ms/step - loss: 102.5738 - mae: 8.2045 - val_loss: 108.8373 - val_mae: 7.7611
Epoch 8/25
10/10 [==============================] - 0s 5ms/step - loss: 62.3362 - mae: 6.0779 - val_loss: 83.7476 - val_mae: 6.9045
Epoch 9/25
10/10 [==============================] - 0s 5ms/step - loss: 54.6748 - mae: 5.5581 - val_loss: 64.6556 - val_mae: 6.0057
Epoch 10/25
10/10 [==============================] - 0s 5ms/step - loss: 38.2626 - mae: 4.5089 - val_loss: 52.2166 - val_mae: 5.2636
Epoch 11/25
10/10 [==============================] - 0s 5ms/step - loss: 29.6534 - mae: 3.8594 - val_loss: 42.0955 - val_mae: 4.6582
Epoch 12/25
10/10 [==============================] - 0s 5ms/step - loss: 23.6841 - mae: 3.5540 - val_loss: 35.9563 - val_mae: 4.2582
Epoch 13/25
10/10 [==============================] - 0s 6ms/step - loss: 22.4709 - mae: 3.3830 - val_loss: 32.3157 - val_mae: 3.9782
Epoch 14/25
10/10 [==============================] - 0s 5ms/step - loss: 29.0002 - mae: 3.3399 - val_loss: 29.0226 - val_mae: 3.7494
Epoch 15/25
10/10 [==============================] - 0s 5ms/step - loss: 16.4506 - mae: 2.8439 - val_loss: 28.3905 - val_mae: 3.6493
Epoch 16/25
10/10 [==============================] - 0s 5ms/step - loss: 14.7652 - mae: 2.7855 - val_loss: 26.5145 - val_mae: 3.5460
Epoch 17/25
10/10 [==============================] - 0s 6ms/step - loss: 17.2251 - mae: 2.8775 - val_loss: 24.6995 - val_mae: 3.4769
Epoch 18/25
10/10 [==============================] - 0s 5ms/step - loss: 18.4610 - mae: 2.8323 - val_loss: 23.6749 - val_mae: 3.3981
Epoch 19/25
10/10 [==============================] - 0s 5ms/step - loss: 17.8006 - mae: 2.9075 - val_loss: 23.2420 - val_mae: 3.3467
Epoch 20/25
10/10 [==============================] - 0s 5ms/step - loss: 14.6954 - mae: 2.6773 - val_loss: 22.6341 - val_mae: 3.2986
Epoch 21/25
10/10 [==============================] - 0s 5ms/step - loss: 15.6623 - mae: 2.7753 - val_loss: 21.0724 - val_mae: 3.2230
Epoch 22/25
10/10 [==============================] - 0s 5ms/step - loss: 12.5014 - mae: 2.5566 - val_loss: 20.8122 - val_mae: 3.1781
Epoch 23/25
10/10 [==============================] - 0s 5ms/step - loss: 18.1167 - mae: 2.7763 - val_loss: 19.5292 - val_mae: 3.1146
Epoch 24/25
10/10 [==============================] - 0s 5ms/step - loss: 14.6604 - mae: 2.8073 - val_loss: 19.0061 - val_mae: 3.0980
Epoch 25/25
10/10 [==============================] - 0s 5ms/step - loss: 13.0764 - mae: 2.5586 - val_loss: 18.5477 - val_mae: 3.0422

 

import matplotlib.pyplot as plt
fig = plt.figure(figsize = (10,5))

ax1 = fig.add_subplot(1,2,1)
ax1.plot(history.history['loss'], 'b-', label='loss')
ax1.plot(history.history['val_loss'], 'r--', label='val_loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()

ax2 = fig.add_subplot(1,2,2)
ax2.plot(history.history['mae'], 'b-', label='mae')
ax2.plot(history.history['val_mae'], 'r--', label='val_mae')
ax2.set_xlabel('epochs')
ax2.set_ylabel('mae')
ax2.legend()

ax2.set_title('traine and val mae')
plt.xlabel('Epoch')
plt.show()

 

 

반응형
728x90
반응형

# 교차검증 // 과대적합 방지
# hold out 검증 : 데이터셋 무작위 선택,
# kfold 검증 : 데이터 k개 그룸으로 분리하고 1개는 검증 나머지는 학습

from sklearn.model_selection import KFold
from tensorflow.keras.datasets.boston_housing import load_data
from tensorflow.keras.layers import Dense
from sklearn.model_selection import KFold
import numpy as np
(x_train, y_train), (x_test, y_test) = load_data(path='boston_housing.npz', test_split=0.2, seed=777)

 

# 데이터 표준화
import numpy as np
# 평균
mean = np.mean(x_train, axis = 0)
# 표준편차
std = np.std(x_train, axis = 0) # 표준편차값
# 표준화값
x_train = (x_train - mean) / std
x_test = (x_test - mean) / std

 

k = 3
kfold = KFold(n_splits = k)
def get_model() :
    model = Sequential()
    model.add(Dense(64, activation = 'relu', input_shape = (13,)))
    model.add(Dense(32, activation = 'relu'))
    model.add(Dense(1)) # 출력값 1개 # 시그모이드하면 안됨 0~1이니깐 
    model.compile(optimizer = 'adam', loss = 'mse', metrics = ['mae'])
    return model

 

mae_list = []
for train_index, val_index in kfold.split(x_train) :
    x_train_fold, x_val_fold = x_train[train_index], x_train[val_index]
    y_train_fold, y_val_fold = y_train[train_index], y_train[val_index]
    model = get_model()
    model.fit(x_train_fold, y_train_fold, epochs = 300, validation_data = (x_val_fold, y_val_fold))
    # 평가 : 손실함수값, 평가기준값
    _, test_mae = model.evaluate(x_test, y_test)
    mae_list.append(test_mae)

 

model.evaluate(x_test, y_test)

4/4 [==============================] - 0s 1ms/step - loss: 9.3774 - mae: 2.1469
[9.377416610717773, 2.14687442779541]

 

mae_list

# [2.11413836479187, 2.008519411087036, 2.14687442779541]

 

# 최종결과
print('결과 :', np.mean(mae_list))

# 결과 : 2.089844067891439

 

# 산점도를 이용하여 실제 주택가격과 예측가격 시각화하기
results = model.predict(x_test)
results[:10]

array([[21.987972],
       [21.056334],
       [47.57793 ],
       [22.31767 ],
       [12.485436],
       [33.702785],
       [26.14967 ],
       [29.55312 ],
       [26.651386],
       [24.07104 ]], dtype=float32)

 

import matplotlib.pyplot as plt
plt.figure(figsize = (5,5))
plt.plot(y_test, results, 'b.')
plt.plot([min(y_test), max(y_test)], [min(results), max(results)], ls='--', c='.3')
plt.xlabel('y_test')
plt.ylabel('results')
plt.show()

 

반응형
728x90
반응형
# 보스턴 주책가격 딥러닝
from tensorflow.keras.datasets.boston_housing import load_data
(x_train, y_train), (x_test, y_test) = load_data(path='boston_housing.npz', test_split=0.2, seed=777)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

# (404, 13) (404,)
# (102, 13) (102,)

 

# 전처리, 표준화
# minmax normalization : x = (x - min) / (max - min) => x / max
# robust normalization : x = (x - 중간값) / (3분위값 - 1분위값)
# standardization : x = x-mean / std

import numpy as np
# 평균
mean = np.mean(x_train, axis = 0)
# 표준편차
std = np.std(x_train, axis = 0) # 표준편차값
# 표준화값
x_train = (x_train - mean) / std
x_test = (x_test - mean) / std

 

# 분리
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.33, random_state = 777)

 

# 모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(64, activation = 'relu', input_shape = (13,)))
model.add(Dense(32, activation = 'relu'))
model.add(Dense(1)) # 출력값 1개 # 시그모이드하면 안됨 0~1이니깐

 

# mse 평균 제곱오차
# mae 평균 절대오차
model.compile(optimizer = 'adam', loss = 'mse', metrics = ['mae'])

 

# 학습
history = model.fit(x_train, y_train, epochs = 10, validation_data = (x_val, y_val))

Epoch 1/10
9/9 [==============================] - 1s 18ms/step - loss: 573.3483 - mae: 22.1287 - val_loss: 590.5661 - val_mae: 22.0131
Epoch 2/10
9/9 [==============================] - 0s 6ms/step - loss: 539.8865 - mae: 21.2907 - val_loss: 566.5182 - val_mae: 21.4469
Epoch 3/10
9/9 [==============================] - 0s 6ms/step - loss: 477.1085 - mae: 19.9834 - val_loss: 535.4792 - val_mae: 20.6974
Epoch 4/10
9/9 [==============================] - 0s 6ms/step - loss: 466.8590 - mae: 19.5715 - val_loss: 493.8533 - val_mae: 19.6793
Epoch 5/10
9/9 [==============================] - 0s 6ms/step - loss: 424.9529 - mae: 18.5560 - val_loss: 438.8377 - val_mae: 18.3004
Epoch 6/10
9/9 [==============================] - 0s 5ms/step - loss: 379.3619 - mae: 17.4473 - val_loss: 369.3901 - val_mae: 16.5165
Epoch 7/10
9/9 [==============================] - 0s 6ms/step - loss: 306.1354 - mae: 15.6274 - val_loss: 289.7295 - val_mae: 14.3575
Epoch 8/10
9/9 [==============================] - 0s 6ms/step - loss: 242.9725 - mae: 13.7859 - val_loss: 208.9959 - val_mae: 11.8795
Epoch 9/10
9/9 [==============================] - 0s 6ms/step - loss: 151.2137 - mae: 10.6995 - val_loss: 139.9005 - val_mae: 9.3653
Epoch 10/10
9/9 [==============================] - 0s 5ms/step - loss: 101.8669 - mae: 8.2810 - val_loss: 93.3427 - val_mae: 7.3553

 

model.evaluate(x_test, y_test)

4/4 [==============================] - 0s 2ms/step - loss: 88.2695 - mae: 7.8024
[88.26952362060547, 7.802405834197998]

 

results = model.predict(x_test)
xval = range(len(x_test))

 

# 학습 결과 확인
# loss, acc, val_loss, val_acc를 그래프로 출력
# 훈련 및 검증데이터의 손실값을 그래프로 출력
import matplotlib.pyplot as plt

fig = plt.figure(figsize = (10,5))
ax1 = fig.add_subplot(1,2,1)
ax1.plot(xval, results, color='blue', label='predict_value')
ax1.set_title('predict value')
ax1.set_xlabel('data')
ax1.set_ylabel('price')
ax2 = fig.add_subplot(1,2,2)
ax2.plot(xval, results, color='orange', label='real_value')
ax2.set_title('real value')
ax2.set_xlabel('data')
ax2.set_ylabel('price')
plt.show()

 

fig = plt.figure(figsize = (10,5))
ax1 = fig.add_subplot(1,1,1)
ax1.plot(xval, results, color = 'blue', label = 'predict_value')
ax1.plot(xval, y_test, color = 'orange', label = 'real_value')
ax1.set_title('predict and real value')
ax1.set_xlabel('data')
ax1.set_ylabel('price')
ax1.legend()
plt.show()

 

 

반응형
728x90
반응형
# fashionmnist
from tensorflow.keras.datasets.fashion_mnist import load_data
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape)

# (60000, 28, 28)

y_train[:10]

# array([9, 0, 0, 3, 0, 2, 7, 2, 5, 5], dtype=uint8)

 

class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

 

# 임의의 데이터를 이미지로 출력
import matplotlib.pyplot as plt
import numpy as np
sample_size = 9
# 난수 생성 0 ~ 59999 중 9개
random_idx = np.random.randint(60000, size = sample_size)
plt.figure(figsize = (5, 5))
for i, idx in enumerate(random_idx) :
    plt.subplot(3,3, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(x_train[idx], cmap = 'gray')
    plt.xlabel(class_names[y_train[idx]])
plt.show()

 

# 학습데이터 테스트 데이터 정규화 0~1
x_train[:10]
x_train = x_train / 255
x_test = x_test / 255

 

from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

 

from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.3, random_state = 777)
print('훈련데이터 :',X_train.shape,', 레이블 :',Y_train.shape)
print('검증데이터 :',X_val.shape,', 레이블 :',Y_val.shape)

# 훈련데이터 : (42000, 28, 28) , 레이블 : (42000, 10)
# 검증데이터 : (18000, 28, 28) , 레이블 : (18000, 10)

 

Model 1 : 입력층 64, 은닉층 32 출력층 10

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model1 = Sequential()
model1.add(Flatten(input_shape=(28, 28))) # 2차원을 1차원으로 변경, 입력층 vs .reshape으로 1차원으로 바꿀필요없음
model1.add(Dense(64, activation = 'relu'))
model1.add(Dense(32, activation = 'relu'))
model1.add(Dense(10, activation = 'softmax'))

 

model1.summary()              # 입력층
                                # 첫번째 은닉층 64
                                # 출력층 32
                                # 출력층 10
                                
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten_3 (Flatten)          (None, 784)               0         
_________________________________________________________________
dense_9 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_10 (Dense)             (None, 32)                2080      
_________________________________________________________________
dense_11 (Dense)             (None, 10)                330       
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________

 

model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history1 = model1.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(X_val, Y_val))


Epoch 1/30
469/469 [==============================] - 1s 3ms/step - loss: 0.8711 - acc: 0.6973 - val_loss: 0.4393 - val_acc: 0.8425
Epoch 2/30
469/469 [==============================] - 1s 2ms/step - loss: 0.4209 - acc: 0.8495 - val_loss: 0.3694 - val_acc: 0.8717
Epoch 3/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3815 - acc: 0.8622 - val_loss: 0.3443 - val_acc: 0.8754
Epoch 4/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3505 - acc: 0.8727 - val_loss: 0.3208 - val_acc: 0.8841
Epoch 5/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3327 - acc: 0.8785 - val_loss: 0.3102 - val_acc: 0.8882
Epoch 6/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3199 - acc: 0.8824 - val_loss: 0.2903 - val_acc: 0.8952
Epoch 7/30
469/469 [==============================] - 1s 2ms/step - loss: 0.3057 - acc: 0.8892 - val_loss: 0.2904 - val_acc: 0.8952
Epoch 8/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2986 - acc: 0.8901 - val_loss: 0.2779 - val_acc: 0.8995
Epoch 9/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2860 - acc: 0.8940 - val_loss: 0.2606 - val_acc: 0.9059
Epoch 10/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2795 - acc: 0.8974 - val_loss: 0.2608 - val_acc: 0.9048
Epoch 11/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2768 - acc: 0.8970 - val_loss: 0.2661 - val_acc: 0.9025
Epoch 12/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2607 - acc: 0.9033 - val_loss: 0.2498 - val_acc: 0.9096
Epoch 13/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2628 - acc: 0.9039 - val_loss: 0.2418 - val_acc: 0.9116
Epoch 14/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2509 - acc: 0.9073 - val_loss: 0.2443 - val_acc: 0.9102
Epoch 15/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2412 - acc: 0.9120 - val_loss: 0.2269 - val_acc: 0.9183
Epoch 16/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2372 - acc: 0.9133 - val_loss: 0.2412 - val_acc: 0.9125
Epoch 17/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2391 - acc: 0.9112 - val_loss: 0.2172 - val_acc: 0.9212
Epoch 18/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2262 - acc: 0.9171 - val_loss: 0.2078 - val_acc: 0.9261
Epoch 19/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2258 - acc: 0.9153 - val_loss: 0.2031 - val_acc: 0.9273
Epoch 20/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2245 - acc: 0.9169 - val_loss: 0.2010 - val_acc: 0.9285
Epoch 21/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2173 - acc: 0.9191 - val_loss: 0.2006 - val_acc: 0.9281
Epoch 22/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2162 - acc: 0.9197 - val_loss: 0.2037 - val_acc: 0.9268
Epoch 23/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2087 - acc: 0.9242 - val_loss: 0.1952 - val_acc: 0.9304
Epoch 24/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2038 - acc: 0.9250 - val_loss: 0.1937 - val_acc: 0.9313
Epoch 25/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2038 - acc: 0.9245 - val_loss: 0.2027 - val_acc: 0.9263
Epoch 26/30
469/469 [==============================] - 1s 2ms/step - loss: 0.2032 - acc: 0.9229 - val_loss: 0.2045 - val_acc: 0.9246
Epoch 27/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1944 - acc: 0.9291 - val_loss: 0.1839 - val_acc: 0.9336
Epoch 28/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1925 - acc: 0.9286 - val_loss: 0.1966 - val_acc: 0.9277
Epoch 29/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1818 - acc: 0.9326 - val_loss: 0.1905 - val_acc: 0.9293
Epoch 30/30
469/469 [==============================] - 1s 2ms/step - loss: 0.1872 - acc: 0.9304 - val_loss: 0.1950 - val_acc: 0.9287

 

model1.evaluate(x_test,y_test)

313/313 [==============================] - 0s 1ms/step - loss: 0.3881 - acc: 0.8763
[0.3880555033683777, 0.8762999773025513]

 

results = model1.predict(x_test)

 

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[891,   2,  14,  11,   4,   2,  72,   0,   4,   0],
       [ 10, 971,   1,  10,   4,   0,   4,   0,   0,   0],
       [ 22,   1, 787,  12,  91,   0,  87,   0,   0,   0],
       [ 37,   8,  13, 869,  37,   1,  31,   0,   4,   0],
       [  4,   0,  99,  23, 815,   1,  57,   0,   1,   0],
       [  0,   0,   0,   1,   0, 952,   0,  20,   1,  26],
       [159,   1,  77,  25,  77,   0, 657,   0,   4,   0],
       [  0,   0,   0,   0,   0,  20,   0, 884,   2,  94],
       [ 10,   1,   4,   4,  12,   3,  11,   4, 951,   0],
       [  0,   0,   0,   0,   0,   5,   1,   8,   0, 986]], dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

 

df.value_counts().sort_index()

0    1000
1    1000
2    1000
3    1000
4    1000
5    1000
6    1000
7    1000
8    1000
9    1000
dtype: int64

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.79      0.89      0.84      1000
           1       0.99      0.97      0.98      1000
           2       0.79      0.79      0.79      1000
           3       0.91      0.87      0.89      1000
           4       0.78      0.81      0.80      1000
           5       0.97      0.95      0.96      1000
           6       0.71      0.66      0.68      1000
           7       0.97      0.88      0.92      1000
           8       0.98      0.95      0.97      1000
           9       0.89      0.99      0.94      1000

    accuracy                           0.88     10000
   macro avg       0.88      0.88      0.88     10000
weighted avg       0.88      0.88      0.88     10000

 

Model 2 구성하기 ,입력층 출력 128, 은닉층 64, 은닉층 32

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
model2 = Sequential()
model2.add(Flatten(input_shape=(28, 28))) # 2차원을 1차원으로 변경, 입력층 vs .reshape으로 1차원으로 바꿀필요없음
model2.add(Dense(128, activation = 'relu'))
model2.add(Dense(64, activation = 'relu'))
model2.add(Dense(32, activation = 'relu'))
model2.add(Dense(10, activation = 'softmax'))
model2.summary()

Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten_2 (Flatten)          (None, 784)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 128)               100480    
_________________________________________________________________
dense_7 (Dense)              (None, 64)                8256      
_________________________________________________________________
dense_8 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_9 (Dense)              (None, 10)                330       
=================================================================
Total params: 111,146
Trainable params: 111,146
Non-trainable params: 0
_________________________________________________________________

 

model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
histroy2 = model2.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(X_val, Y_val))

Epoch 1/30
469/469 [==============================] - 2s 3ms/step - loss: 0.8033 - acc: 0.7328 - val_loss: 0.3923 - val_acc: 0.8634
Epoch 2/30
469/469 [==============================] - 1s 3ms/step - loss: 0.4000 - acc: 0.8577 - val_loss: 0.3466 - val_acc: 0.8776
Epoch 3/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3499 - acc: 0.8721 - val_loss: 0.3355 - val_acc: 0.8779
Epoch 4/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3283 - acc: 0.8814 - val_loss: 0.3091 - val_acc: 0.8868
Epoch 5/30
469/469 [==============================] - 1s 3ms/step - loss: 0.3073 - acc: 0.8877 - val_loss: 0.2801 - val_acc: 0.8997
Epoch 6/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2897 - acc: 0.8933 - val_loss: 0.2657 - val_acc: 0.9041
Epoch 7/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2751 - acc: 0.8974 - val_loss: 0.2663 - val_acc: 0.9022
Epoch 8/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2692 - acc: 0.8995 - val_loss: 0.2529 - val_acc: 0.9076
Epoch 9/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2634 - acc: 0.9024 - val_loss: 0.2363 - val_acc: 0.9133
Epoch 10/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2438 - acc: 0.9089 - val_loss: 0.2339 - val_acc: 0.9136
Epoch 11/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2448 - acc: 0.9083 - val_loss: 0.2393 - val_acc: 0.9090
Epoch 12/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2355 - acc: 0.9112 - val_loss: 0.2355 - val_acc: 0.9114
Epoch 13/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2243 - acc: 0.9148 - val_loss: 0.2158 - val_acc: 0.9202
Epoch 14/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2173 - acc: 0.9186 - val_loss: 0.2080 - val_acc: 0.9226
Epoch 15/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2168 - acc: 0.9197 - val_loss: 0.1917 - val_acc: 0.9295
Epoch 16/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2055 - acc: 0.9223 - val_loss: 0.1866 - val_acc: 0.9332
Epoch 17/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1978 - acc: 0.9242 - val_loss: 0.1891 - val_acc: 0.9302
Epoch 18/30
469/469 [==============================] - 1s 3ms/step - loss: 0.2004 - acc: 0.9248 - val_loss: 0.1708 - val_acc: 0.9376
Epoch 19/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1906 - acc: 0.9285 - val_loss: 0.1878 - val_acc: 0.9294
Epoch 20/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1884 - acc: 0.9303 - val_loss: 0.1774 - val_acc: 0.9341
Epoch 21/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1841 - acc: 0.9300 - val_loss: 0.1714 - val_acc: 0.9340
Epoch 22/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1739 - acc: 0.9330 - val_loss: 0.1576 - val_acc: 0.9419
Epoch 23/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1723 - acc: 0.9344 - val_loss: 0.1664 - val_acc: 0.9353
Epoch 24/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1663 - acc: 0.9377 - val_loss: 0.1593 - val_acc: 0.9411
Epoch 25/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1593 - acc: 0.9401 - val_loss: 0.1602 - val_acc: 0.9388
Epoch 26/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1598 - acc: 0.9394 - val_loss: 0.1670 - val_acc: 0.9378
Epoch 27/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1606 - acc: 0.9389 - val_loss: 0.1579 - val_acc: 0.9382
Epoch 28/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1464 - acc: 0.9441 - val_loss: 0.1777 - val_acc: 0.9341
Epoch 29/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1471 - acc: 0.9452 - val_loss: 0.1376 - val_acc: 0.9475
Epoch 30/30
469/469 [==============================] - 1s 3ms/step - loss: 0.1513 - acc: 0.9430 - val_loss: 0.1364 - val_acc: 0.9483

 

model2.evaluate(x_test,y_test)

313/313 [==============================] - 0s 1ms/step - loss: 0.3984 - acc: 0.8906
[0.398380845785141, 0.8906000256538391]

 

results = model2.predict(x_test)

 

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[860,   1,  11,  26,   5,   1,  89,   0,   7,   0],
       [  6, 973,   0,  15,   4,   0,   2,   0,   0,   0],
       [ 21,   1, 793,  14,  89,   1,  78,   0,   3,   0],
       [ 21,   3,   7, 900,  40,   0,  22,   0,   7,   0],
       [  1,   0,  78,  21, 856,   1,  41,   0,   2,   0],
       [  0,   0,   0,   1,   0, 979,   0,  14,   1,   5],
       [131,   1,  60,  42,  76,   0, 677,   0,  13,   0],
       [  0,   0,   0,   0,   0,  27,   0, 930,   0,  43],
       [  6,   0,   4,   4,   2,   2,   4,   3, 974,   1],
       [  0,   0,   0,   0,   0,  15,   1,  20,   0, 964]], dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.82      0.86      0.84      1000
           1       0.99      0.97      0.98      1000
           2       0.83      0.79      0.81      1000
           3       0.88      0.90      0.89      1000
           4       0.80      0.86      0.83      1000
           5       0.95      0.98      0.97      1000
           6       0.74      0.68      0.71      1000
           7       0.96      0.93      0.95      1000
           8       0.97      0.97      0.97      1000
           9       0.95      0.96      0.96      1000

    accuracy                           0.89     10000
   macro avg       0.89      0.89      0.89     10000
weighted avg       0.89      0.89      0.89     10000

 

모델 비교하기

import numpy as np
import matplotlib.pyplot as plt
def draw_loss_acc(history_1, history_2, epochs) :
    his_dict_1 = history_1.history
    his_dict_2 = history_2.history
    keys = list(his_dict_1.keys()) # ['loss', 'acc', 'val_loss', 'val_acc']
    epochs = range(1, epochs)
    fig = plt.figure(figsize = (10,10))
    ax = fig.add_subplot(1,1,1) # 여러개를 하나의 그래프로
    ax.spines['top'].set_color('none') # c로 바꾸면 외곽선이 그려짐
    ax.spines['bottom'].set_color('none')
    ax.spines['left'].set_color('none')
    ax.spines['right'].set_color('none')
    ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
    for i in range(len(his_dict_1)) :
        temp_ax = fig.add_subplot(2,2, i + 1)
        temp = keys[i%2]
        val_temp = keys[(i+2)%2 +2]
        temp_history = his_dict_1 if i < 2 else his_dict_2
        temp_ax.plot(epochs, temp_history[temp][1:], color = 'blue', label ='train_'+temp) # 손실함수 그래프
        temp_ax.plot(epochs, temp_history[val_temp][1:], color = 'orange', label = val_temp)
        if(i==1 or i==3) : # 정확도인 경우
            start, end = temp_ax.get_ylim()
            temp_ax.yaxis.set_ticks(np.arange(np.round(start, 2), end, 0.01))
        temp_ax.legend()
    ax.set_ylabel('loss', size = 20)
    ax.set_xlabel('Epochs', size = 20)
    plt.tight_layout()
    plt.show()
    
draw_loss_acc(history1, history2, 30)

 

 

history1.history.keys()
history2.history.keys()

# dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

 

반응형
728x90
반응형

# tensorflow2.0부터는 keras 내장이라 별도로 부를 필요없음

캐라스 딥러닝

1. 학습데이터 정의 => 훈련데이터, 검증데이터, 테스트데이터
2. 모델정의 => 층구성(dense)
3. 학습과정설정 => 컴파일 : 손실함수, 옵티마이져, 평가지표 acc
    손실함수 : mse: 평균제곱오차, 회귀분석
               binary+crossentry : 이항분류시
                categorical_crossentropy 다항분류시 
4. 모델학습 => fit()
    에폭(epochs) : 학습데이터의 반복횟수 지정    100번을
    배치크기(batch_size) : 학습데이터 분리       분리해서 둘림
    검증데이터(validation_data) : 1
5. 모델평가 => evaluate(), predict()

 

MNIST 데이터 인식

# mnist : 숫자학습 인식
from tensorflow.keras.datasets.mnist import load_data
(x_train, y_train),(x_test, y_test) = load_data(path='mnist.npz')
# 데이터 형태
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)

 

import matplotlib.pyplot as plt
import numpy as np
random_idx = np.random.randint(60000, size = 3)
for idx in random_idx :
    img = x_train[idx, :]
    label = y_train[idx]
    plt.figure()
    plt.imshow(img) # 이미지출력
    plt.title('%d-th data, label is %d'%(idx,label), fontsize=15)

 

# 검증데이터
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size=0.3, random_state = 777)
print('훈련데이터 :',X_train.shape,', 레이블 :',Y_train.shape)
print('검증데이터 :',X_val.shape,', 레이블 :',Y_val.shape)

훈련데이터 : (42000, 28, 28) , 레이블 : (42000,)
검증데이터 : (18000, 28, 28) , 레이블 : (18000,)

 

# 활성화 함수 결정 : SOFTMAX => 다중분류시 사용
x = np.arange(-5.0, 5.0, 0.1)
y = np.exp(x) / np.sum(np.exp(x))
# 지수함수 x 를 합계로 나눈다
plt.plot(x, y)
plt.title("softmax Function")
plt.show()
# 절대 1이 넘지 않음
# 결과값의 합이 1

 

# 전처리
num_x_train = X_train.shape[0]
num_x_val = X_val.shape[0]
num_x_test = x_test.shape[0]

 

# x_train(42000, 28, 28)형태의 배열을
# x_train(42000, 784)형태의 2차원 배열로 변경
x_train = (X_train.reshape((num_x_train, 28*28))) / 255 # 정규화
x_val = (X_val.reshape((num_x_val, 28*28))) / 255
x_test = (x_test.reshape((num_x_test, 28*28))) / 255
print(x_train.shape)
# minMax normalization : x = (x - min) / (max - mix) => x / max
# robust normalization : x = (x - 중간값) / (3분위값 - 1분위값)
# standardization : x = x-mean / std

# (42000, 784)

 

# y_test의 0~9까지의 숫자가 각각 몇개인지 조회
type(y_test)

# numpy.ndarray

y_test[:10]

# array([7, 2, 1, 0, 4, 1, 4, 9, 5, 9], dtype=uint8)

 

import pandas as pd
df = pd.Series(y_test)
df.value_counts().sort_index()

0     980
1    1135
2    1032
3    1010
4     982
5     892
6     958
7    1028
8     974
9    1009
dtype: int64

 

# 레이블 전처리 one-hot 인코딩방식 전처리
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(Y_train)
y_val = to_categorical(Y_val)
y_test = to_categorical(y_test)
print(y_train[0])

# [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]

 

# 모델 구성하기
# 층만들기
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential() # 퍼셉트론
# 첫결과를 64개로
model.add(Dense(64, activation = 'relu', input_shape = (784, ))) 
# 첫결과를 32개로
model.add(Dense(32, activation = 'relu')) # relu 일반층에서 많이 씀
# 첫결과를 10개로
model.add(Dense(10, activation = 'softmax')) # 출력층의 결과 0~9로 분류

 

# 모델의 형태 출력하기
model.summary()
# 이름 알아서 설정         
# # 층이 3개,                  아웃풋                 가중치파라미터 
#                               64개                    50240 // 64*(784+1)
#                               32                         2080 // 32*(64+1)
#                               10                        330 //  10 * (32+1)


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_2 (Dense)              (None, 64)                50240     
_________________________________________________________________
dense_3 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_4 (Dense)              (None, 10)                330       
=================================================================
Total params: 52,650
Trainable params: 52,650
Non-trainable params: 0
_________________________________________________________________

 

# softmax 함수 구현
import numpy as np
# np.exp 지수함수 10의 1승 이면 1
def softmax(arr) : # 상대적인
    m = np.max(arr)
    arr = arr - m
    arr = np.exp(arr)
    return arr / np.sum(arr)
def sigmoid(x) : # 절대적인
    return 1 / (1+np.exp(-x))
case_1 = np.array([3.1, 3.0, 2.9])
case_2 = np.array([2.0, 1.0, 0.7])
np.set_printoptions(precision=3)
print("sigmoid:",sigmoid(case_1),",softmax :",softmax(case_1)) # 비슷하게 만듬
print("sigmoid:",sigmoid(case_2),",softmax :",softmax(case_2) ) # 

# sigmoid: [0.957 0.953 0.948] ,softmax : [0.367 0.332 0.301]
# sigmoid: [0.881 0.731 0.668] ,softmax : [0.61  0.224 0.166]

 

# 학습과정 설정
model.compile(optimizer='adam', # 옵티마이저 : adam
             loss = 'categorical_crossentropy', # 손실함수
             metrics = ['acc']) # 모니터링할 평가지표 acc

 

# 모델학습
history = model.fit(x_train, y_train,
                   epochs = 30, # 역전파를 이용해서 최적을 찾아냄
                   batch_size = 128, # 학습데이터 개수
                   validation_data = (x_val, y_val)) # 검증데이터
                   

Epoch 1/30
329/329 [==============================] - 2s 5ms/step - loss: 0.8727 - acc: 0.7424 - val_loss: 0.2460 - val_acc: 0.9320
Epoch 2/30
329/329 [==============================] - 1s 3ms/step - loss: 0.2213 - acc: 0.9367 - val_loss: 0.1820 - val_acc: 0.9483
Epoch 3/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1598 - acc: 0.9545 - val_loss: 0.1591 - val_acc: 0.9536
Epoch 4/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1280 - acc: 0.9624 - val_loss: 0.1331 - val_acc: 0.9613
Epoch 5/30
329/329 [==============================] - 1s 3ms/step - loss: 0.1030 - acc: 0.9708 - val_loss: 0.1261 - val_acc: 0.9631
Epoch 6/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0922 - acc: 0.9746 - val_loss: 0.1256 - val_acc: 0.9618
Epoch 7/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0733 - acc: 0.9787 - val_loss: 0.1165 - val_acc: 0.9654
Epoch 8/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0734 - acc: 0.9785 - val_loss: 0.1167 - val_acc: 0.9652
Epoch 9/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0607 - acc: 0.9823 - val_loss: 0.1165 - val_acc: 0.9659
Epoch 10/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0476 - acc: 0.9864 - val_loss: 0.1121 - val_acc: 0.9668
Epoch 11/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0435 - acc: 0.9878 - val_loss: 0.1068 - val_acc: 0.9691
Epoch 12/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0429 - acc: 0.9868 - val_loss: 0.1087 - val_acc: 0.9688
Epoch 13/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0345 - acc: 0.9901 - val_loss: 0.1074 - val_acc: 0.9691
Epoch 14/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0303 - acc: 0.9915 - val_loss: 0.1146 - val_acc: 0.9682
Epoch 15/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0254 - acc: 0.9934 - val_loss: 0.1143 - val_acc: 0.9690
Epoch 16/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0258 - acc: 0.9932 - val_loss: 0.1271 - val_acc: 0.9660
Epoch 17/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0214 - acc: 0.9941 - val_loss: 0.1138 - val_acc: 0.9694
Epoch 18/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0156 - acc: 0.9966 - val_loss: 0.1177 - val_acc: 0.9705
Epoch 19/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0163 - acc: 0.9960 - val_loss: 0.1228 - val_acc: 0.9692
Epoch 20/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0197 - acc: 0.9940 - val_loss: 0.1201 - val_acc: 0.9703
Epoch 21/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0138 - acc: 0.9962 - val_loss: 0.1259 - val_acc: 0.9690
Epoch 22/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0089 - acc: 0.9985 - val_loss: 0.1308 - val_acc: 0.9684
Epoch 23/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0090 - acc: 0.9978 - val_loss: 0.1376 - val_acc: 0.9672
Epoch 24/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0104 - acc: 0.9973 - val_loss: 0.1387 - val_acc: 0.9688
Epoch 25/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0091 - acc: 0.9977 - val_loss: 0.1484 - val_acc: 0.9666
Epoch 26/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0097 - acc: 0.9971 - val_loss: 0.1367 - val_acc: 0.9697
Epoch 27/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0049 - acc: 0.9992 - val_loss: 0.1401 - val_acc: 0.9702
Epoch 28/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0041 - acc: 0.9991 - val_loss: 0.1416 - val_acc: 0.9697
Epoch 29/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0170 - acc: 0.9949 - val_loss: 0.1521 - val_acc: 0.9678
Epoch 30/30
329/329 [==============================] - 1s 3ms/step - loss: 0.0070 - acc: 0.9976 - val_loss: 0.1527 - val_acc: 0.9689

 

history.history.keys()

# dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

 

type(history)

# tensorflow.python.keras.callbacks.History

 

type(history.history)

# dict

 

history.history

{'loss': [0.49928054213523865,
  0.2102358192205429,
  0.1598261296749115,
  0.1318138986825943,
  0.11270429939031601,
  0.09517224133014679,
  0.08382865786552429,
  0.0735907107591629,
  0.0659095048904419,
  0.05770876258611679,
  0.05159643664956093,
  0.04378071427345276,
  0.038674868643283844,
  0.034976404160261154,
  0.03131331503391266,
  0.02853371389210224,
  0.02386757917702198,
  0.02131393365561962,
  0.019856156781315804,
  0.016329089179635048,
  0.016619833186268806,
  0.016199814155697823,
  0.011386237107217312,
  0.010206134989857674,
  0.009236675687134266,
  0.00816024374216795,
  0.011310409754514694,
  0.007149739656597376,
  0.0060667796060442924,
  0.004311054944992065],
 'acc': [0.8610952496528625,
  0.9389761686325073,
  0.9542142748832703,
  0.9617618918418884,
  0.9669285416603088,
  0.9709523916244507,
  0.9750000238418579,
  0.9781190752983093,
  0.9802619218826294,
  0.9830714464187622,
  0.9849047660827637,
  0.9872142672538757,
  0.9891190528869629,
  0.9903333187103271,
  0.991428554058075,
  0.9916666746139526,
  0.9935476183891296,
  0.9939047694206238,
  0.994523823261261,
  0.9958571195602417,
  0.9951428771018982,
  0.9955000281333923,
  0.9975237846374512,
  0.9977142810821533,
  0.9980952143669128,
  0.9980238080024719,
  0.9968809485435486,
  0.9981904625892639,
  0.9988333582878113,
  0.9992856979370117],
 'val_loss': [0.24861222505569458,
  0.18751391768455505,
  0.16067922115325928,
  0.14771240949630737,
  0.1354956030845642,
  0.12186738103628159,
  0.12513096630573273,
  0.11414627730846405,
  0.10838081687688828,
  0.10804055631160736,
  0.10687518864870071,
  0.10763053596019745,
  0.10694220662117004,
  0.10424619913101196,
  0.10935620963573456,
  0.112459696829319,
  0.1159299984574318,
  0.11536701023578644,
  0.11584317684173584,
  0.12000786513090134,
  0.12605518102645874,
  0.12743493914604187,
  0.12300366163253784,
  0.13212160766124725,
  0.12954148650169373,
  0.13898710906505585,
  0.1431078463792801,
  0.1479557752609253,
  0.14380276203155518,
  0.14053674042224884],
 'val_acc': [0.9315555691719055,
  0.9463333487510681,
  0.9527222514152527,
  0.957111120223999,
  0.9597222208976746,
  0.9630555510520935,
  0.9627222418785095,
  0.9657777547836304,
  0.9682222008705139,
  0.9683333039283752,
  0.9681110978126526,
  0.9685555696487427,
  0.9692777991294861,
  0.9705555438995361,
  0.9693889021873474,
  0.9695000052452087,
  0.9695555567741394,
  0.968999981880188,
  0.9695000052452087,
  0.9689444303512573,
  0.9692222476005554,
  0.9681110978126526,
  0.9696666598320007,
  0.9697222113609314,
  0.9701666831970215,
  0.9692777991294861,
  0.9678888916969299,
  0.9691666960716248,
  0.9702222347259521,
  0.9712222218513489]}

 

# 학습 결과 확인
# loss, acc, val_loss, val_acc를 그래프로 출력
# 훈련 및 검증데이터의 손실값을 그래프로 출력
import matplotlib.pyplot as plt
his_dict = history.history # dict : loss학습손실값 acc학습정확도  // val_ 검증

loss = his_dict['loss'] # 학습데이터의 손실함수값
val_loss = his_dict['val_loss'] # 검증데이터의 손실함수값

epochs = range(1, len(loss)+1)
fig = plt.figure(figsize = (10, 5))

# 훈련 및 검증 손실 그리기
ax1 = fig.add_subplot(1,2,1)
ax1.plot(epochs, loss, color = 'blue', label = 'train_loss')
ax1.plot(epochs, val_loss, color = 'orange', label = 'val_loss')
ax1.set_title('train and val loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()

acc = his_dict['acc'] # 정확도
val_acc = his_dict['val_acc'] # 검증데이터의 정확도

# 훈련 및 검증 손실 그리기
ax2 = fig.add_subplot(1,2,2)
ax2.plot(epochs, acc, color = 'blue', label = 'train_acc')
ax2.plot(epochs, val_acc, color = 'orange', label = 'val_acc')
ax2.set_title('train and val acc')
ax2.set_xlabel('epochs')
ax2.set_ylabel('loss')
ax2.legend()
plt.show()

 

# 모델평가 : 평가데이터로 평가 
# 현재 특징 : 학습데이터 성능 좋다. 단, 검증데이터, 테스트데이터는 성능이 학습데이터 성능보다 낮다.
 # => 과대적합 => 그래프를 보니 5번에서 갈라짐 => 5번 만 하고 확인해야한다. 
#     => epochs 줄이기나, random_state 조절, test_data 조절
model.evaluate(x_test, y_test) #
# [0.12191680818796158, 0.9718999862670898] #  검증데이터랑 비슷하게 나옴

313/313 [==============================] - 0s 1ms/step - loss: 0.1437 - acc: 0.9696
[0.14366137981414795, 0.9696000218391418]

 

# 예측값 확인
np.set_printoptions(precision=7)
results = model.predict(x_test) # 예측값
import matplotlib.pyplot as plt
# argmax = results 데이터 중 가장 큰 값 가지는 인덱스 저장
arg_results = np.argmax(results, axis = -1)
idx = 6
plt.imshow(x_test[idx].reshape(28, 28))
plt.title('predicted value of the first image : '+str(arg_results[idx]), fontsize=15)
plt.show()

# 혼돈행렬 조회하기
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
# np.argmax(y_test, axis = -1) y_test의 최대값 인덱스 리턴, 인덱스 숫자
cm = confusion_matrix(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1))
cm

array([[ 958,    0,    3,    2,    0,    3,    6,    3,    2,    3],
       [   1, 1118,    2,    4,    0,    0,    4,    1,    5,    0],
       [   3,    0, 1008,    5,    2,    0,    1,    7,    6,    0],
       [   0,    1,    8,  982,    1,    2,    0,    1,    7,    8],
       [   2,    0,    5,    1,  929,    0,    7,    3,    1,   34],
       [   1,    0,    0,   13,    2,  853,    9,    3,    6,    5],
       [   2,    3,    0,    1,    3,    4,  941,    2,    1,    1],
       [   0,    4,   11,    3,    3,    0,    0,  992,    2,   13],
       [   4,    0,    3,    7,    5,    5,    5,    4,  933,    8],
       [   1,    3,    0,    1,    8,    2,    1,    4,    7,  982]],
      dtype=int64)

 

import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize = (7,7))
sns.heatmap(cm, annot = True, fmt = 'd', cmap = 'Blues')
plt.xlabel('predicted label', fontsize = 15)
plt.ylabel('true label', fontsize = 15)
plt.show()

df.value_counts().sort_index()

0     980
1    1135
2    1032
3    1010
4     982
5     892
6     958
7    1028
8     974
9    1009
dtype: int64

 

 

print(classification_report(np.argmax(y_test, axis = -1), np.argmax(results, axis = -1)))

              precision    recall  f1-score   support

           0       0.99      0.97      0.98       980
           1       0.99      0.98      0.99      1135
           2       0.97      0.97      0.97      1032
           3       0.96      0.97      0.97      1010
           4       0.97      0.97      0.97       982
           5       0.97      0.97      0.97       892
           6       0.97      0.98      0.97       958
           7       0.97      0.97      0.97      1028
           8       0.96      0.96      0.96       974
           9       0.96      0.96      0.96      1009

    accuracy                           0.97     10000
   macro avg       0.97      0.97      0.97     10000
weighted avg       0.97      0.97      0.97     10000

 

 

반응형
728x90
반응형

텐서플로우를 통한 OR게이트

import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import mse
tf.random.set_seed(777)
# 데이터
data = np.array([[0,0],[1,0],[0,1],[1,1]])
# 라벨링
label = np.array([[0],[1],[1],[1]])
model = Sequential()
model.add(Dense(1, input_shape = (2,), activation = 'linear')) # 퍼셉트론
# 모델설정 
# GD 경사하강법, SGD stochastic :minibatch
# loss 손실함수, 비용함수
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
# epochs 100으로 하면 학습하다가 관둬서, 200으로 함
model.fit(data, label, epochs = 200)
# 그 값은 이거야
model.get_weights()
# 머신러닝은 값 하나하나 매겨주는데 딥러닝은 라벨링하고 주면 알아서 찾아감
model.predict(data)
model.evaluate(data, label) # 평가, 손실함수, 정확도

Epoch 1/200
1/1 [==============================] - 1s 1s/step - loss: 1.4290 - acc: 0.5000
Epoch 2/200
1/1 [==============================] - 0s 3ms/step - loss: 1.3602 - acc: 0.5000
Epoch 3/200
1/1 [==============================] - 0s 2ms/step - loss: 1.2956 - acc: 0.5000
Epoch 4/200
1/1 [==============================] - 0s 2ms/step - loss: 1.2349 - acc: 0.5000
Epoch 5/200
1/1 [==============================] - 0s 2ms/step - loss: 1.1779 - acc: 0.5000
Epoch 6/200
1/1 [==============================] - 0s 3ms/step - loss: 1.1242 - acc: 0.5000
Epoch 7/200
1/1 [==============================] - 0s 2ms/step - loss: 1.0738 - acc: 0.5000
Epoch 8/200
1/1 [==============================] - 0s 3ms/step - loss: 1.0264 - acc: 0.5000
Epoch 9/200
1/1 [==============================] - 0s 3ms/step - loss: 0.9819 - acc: 0.5000
Epoch 10/200
1/1 [==============================] - 0s 2ms/step - loss: 0.9399 - acc: 0.5000
Epoch 11/200
1/1 [==============================] - 0s 3ms/step - loss: 0.9005 - acc: 0.5000
Epoch 12/200
1/1 [==============================] - 0s 2ms/step - loss: 0.8634 - acc: 0.5000
Epoch 13/200
1/1 [==============================] - 0s 2ms/step - loss: 0.8284 - acc: 0.5000
Epoch 14/200
1/1 [==============================] - 0s 3ms/step - loss: 0.7955 - acc: 0.5000
Epoch 15/200
1/1 [==============================] - 0s 2ms/step - loss: 0.7646 - acc: 0.5000
Epoch 16/200
1/1 [==============================] - 0s 2ms/step - loss: 0.7354 - acc: 0.5000
Epoch 17/200
1/1 [==============================] - 0s 1000us/step - loss: 0.7079 - acc: 0.5000
Epoch 18/200
1/1 [==============================] - 0s 2ms/step - loss: 0.6820 - acc: 0.5000
Epoch 19/200
1/1 [==============================] - 0s 2ms/step - loss: 0.6576 - acc: 0.5000
Epoch 20/200
1/1 [==============================] - 0s 3ms/step - loss: 0.6346 - acc: 0.5000
Epoch 21/200
1/1 [==============================] - 0s 2ms/step - loss: 0.6129 - acc: 0.5000
Epoch 22/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5925 - acc: 0.5000
Epoch 23/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5732 - acc: 0.5000
Epoch 24/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5549 - acc: 0.5000
Epoch 25/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5377 - acc: 0.5000
Epoch 26/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5215 - acc: 0.5000
Epoch 27/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5061 - acc: 0.5000
Epoch 28/200
1/1 [==============================] - 0s 3ms/step - loss: 0.4916 - acc: 0.5000
Epoch 29/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4778 - acc: 0.5000
Epoch 30/200
1/1 [==============================] - 0s 3ms/step - loss: 0.4648 - acc: 0.5000
Epoch 31/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4525 - acc: 0.7500
Epoch 32/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4409 - acc: 0.7500
Epoch 33/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4298 - acc: 0.7500
Epoch 34/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4193 - acc: 0.7500
Epoch 35/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4094 - acc: 0.7500
Epoch 36/200
1/1 [==============================] - 0s 3ms/step - loss: 0.4000 - acc: 0.7500
Epoch 37/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3911 - acc: 0.7500
Epoch 38/200
1/1 [==============================] - 0s 3ms/step - loss: 0.3826 - acc: 0.7500
Epoch 39/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3745 - acc: 0.7500
Epoch 40/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3668 - acc: 0.7500
Epoch 41/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3595 - acc: 0.7500
Epoch 42/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3525 - acc: 0.7500
Epoch 43/200
1/1 [==============================] - 0s 3ms/step - loss: 0.3459 - acc: 0.7500
Epoch 44/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3396 - acc: 0.7500
Epoch 45/200
1/1 [==============================] - 0s 3ms/step - loss: 0.3336 - acc: 0.7500
Epoch 46/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3278 - acc: 0.7500
Epoch 47/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3223 - acc: 0.7500
Epoch 48/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3170 - acc: 0.7500
Epoch 49/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3120 - acc: 0.7500
Epoch 50/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3072 - acc: 0.7500
Epoch 51/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3026 - acc: 0.7500
Epoch 52/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2982 - acc: 0.7500
Epoch 53/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2939 - acc: 0.7500
Epoch 54/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2898 - acc: 0.7500
Epoch 55/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2859 - acc: 0.7500
Epoch 56/200
1/1 [==============================] - 0s 1ms/step - loss: 0.2822 - acc: 0.7500
Epoch 57/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2785 - acc: 0.7500
Epoch 58/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2750 - acc: 0.7500
Epoch 59/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2717 - acc: 0.7500
Epoch 60/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2684 - acc: 0.7500
Epoch 61/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2653 - acc: 0.7500
Epoch 62/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2623 - acc: 0.7500
Epoch 63/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2594 - acc: 0.7500
Epoch 64/200
1/1 [==============================] - 0s 1ms/step - loss: 0.2565 - acc: 0.7500
Epoch 65/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2538 - acc: 0.7500
Epoch 66/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2511 - acc: 0.7500
Epoch 67/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2486 - acc: 0.7500
Epoch 68/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2461 - acc: 0.7500
Epoch 69/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2436 - acc: 0.7500
Epoch 70/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2413 - acc: 0.7500
Epoch 71/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2390 - acc: 0.7500
Epoch 72/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2368 - acc: 0.7500
Epoch 73/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2346 - acc: 0.7500
Epoch 74/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2325 - acc: 0.7500
Epoch 75/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2304 - acc: 0.7500
Epoch 76/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2284 - acc: 0.7500
Epoch 77/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2264 - acc: 0.7500
Epoch 78/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2245 - acc: 0.7500
Epoch 79/200
1/1 [==============================] - 0s 1ms/step - loss: 0.2226 - acc: 0.7500
Epoch 80/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2208 - acc: 0.7500
Epoch 81/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2190 - acc: 0.7500
Epoch 82/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2173 - acc: 0.7500
Epoch 83/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2155 - acc: 0.7500
Epoch 84/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2139 - acc: 0.7500
Epoch 85/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2122 - acc: 0.7500
Epoch 86/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2106 - acc: 0.7500
Epoch 87/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2090 - acc: 0.7500
Epoch 88/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2074 - acc: 0.7500
Epoch 89/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2059 - acc: 0.7500
Epoch 90/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2044 - acc: 0.7500
Epoch 91/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2029 - acc: 0.7500
Epoch 92/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2015 - acc: 0.7500
Epoch 93/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2000 - acc: 0.7500
Epoch 94/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1986 - acc: 0.7500
Epoch 95/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1973 - acc: 0.7500
Epoch 96/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1959 - acc: 0.7500
Epoch 97/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1946 - acc: 0.7500
Epoch 98/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1932 - acc: 0.7500
Epoch 99/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1919 - acc: 0.7500
Epoch 100/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1907 - acc: 0.7500
Epoch 101/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1894 - acc: 0.7500
Epoch 102/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1881 - acc: 0.7500
Epoch 103/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1869 - acc: 0.7500
Epoch 104/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1857 - acc: 0.7500
Epoch 105/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1845 - acc: 0.7500
Epoch 106/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1833 - acc: 0.7500
Epoch 107/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1822 - acc: 0.7500
Epoch 108/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1810 - acc: 0.7500
Epoch 109/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1799 - acc: 0.7500
Epoch 110/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1787 - acc: 0.7500
Epoch 111/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1776 - acc: 0.7500
Epoch 112/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1765 - acc: 0.7500
Epoch 113/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1755 - acc: 0.7500
Epoch 114/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1744 - acc: 0.7500
Epoch 115/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1733 - acc: 0.7500
Epoch 116/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1723 - acc: 0.7500
Epoch 117/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1712 - acc: 0.7500
Epoch 118/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1702 - acc: 0.7500
Epoch 119/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1692 - acc: 0.7500
Epoch 120/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1682 - acc: 0.7500
Epoch 121/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1672 - acc: 0.7500
Epoch 122/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1663 - acc: 0.7500
Epoch 123/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1653 - acc: 0.7500
Epoch 124/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1643 - acc: 0.7500
Epoch 125/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1634 - acc: 0.7500
Epoch 126/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1625 - acc: 0.7500
Epoch 127/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1615 - acc: 0.7500
Epoch 128/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1606 - acc: 0.7500
Epoch 129/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1597 - acc: 0.7500
Epoch 130/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1588 - acc: 0.7500
Epoch 131/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1579 - acc: 0.7500
Epoch 132/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1570 - acc: 0.7500
Epoch 133/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1562 - acc: 0.7500
Epoch 134/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1553 - acc: 0.7500
Epoch 135/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1545 - acc: 0.7500
Epoch 136/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1536 - acc: 0.7500
Epoch 137/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1528 - acc: 0.7500
Epoch 138/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1520 - acc: 0.7500
Epoch 139/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1511 - acc: 0.7500
Epoch 140/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1503 - acc: 0.7500
Epoch 141/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1495 - acc: 0.7500
Epoch 142/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1487 - acc: 0.7500
Epoch 143/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1480 - acc: 0.7500
Epoch 144/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1472 - acc: 0.7500
Epoch 145/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1464 - acc: 0.7500
Epoch 146/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1456 - acc: 0.7500
Epoch 147/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1449 - acc: 0.7500
Epoch 148/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1441 - acc: 0.7500
Epoch 149/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1434 - acc: 0.7500
Epoch 150/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1427 - acc: 0.7500
Epoch 151/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1419 - acc: 0.7500
Epoch 152/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1412 - acc: 0.7500
Epoch 153/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1405 - acc: 0.7500
Epoch 154/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1398 - acc: 0.7500
Epoch 155/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1391 - acc: 0.7500
Epoch 156/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1384 - acc: 0.7500
Epoch 157/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1377 - acc: 0.7500
Epoch 158/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1370 - acc: 0.7500
Epoch 159/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1364 - acc: 0.7500
Epoch 160/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1357 - acc: 0.7500
Epoch 161/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1350 - acc: 0.7500
Epoch 162/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1344 - acc: 0.7500
Epoch 163/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1337 - acc: 0.7500
Epoch 164/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1331 - acc: 0.7500
Epoch 165/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1325 - acc: 0.7500
Epoch 166/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1318 - acc: 0.7500
Epoch 167/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1312 - acc: 0.7500
Epoch 168/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1306 - acc: 0.7500
Epoch 169/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1300 - acc: 0.7500
Epoch 170/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1294 - acc: 0.7500
Epoch 171/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1288 - acc: 0.7500
Epoch 172/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1282 - acc: 0.7500
Epoch 173/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1276 - acc: 0.7500
Epoch 174/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1270 - acc: 0.7500
Epoch 175/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1264 - acc: 0.7500
Epoch 176/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1258 - acc: 0.7500
Epoch 177/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1253 - acc: 0.7500
Epoch 178/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1247 - acc: 0.7500
Epoch 179/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1242 - acc: 0.7500
Epoch 180/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1236 - acc: 0.7500
Epoch 181/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1231 - acc: 0.7500
Epoch 182/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1225 - acc: 0.7500
Epoch 183/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1220 - acc: 0.7500
Epoch 184/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1214 - acc: 0.7500
Epoch 185/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1209 - acc: 0.7500
Epoch 186/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1204 - acc: 0.7500
Epoch 187/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1199 - acc: 0.7500
Epoch 188/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1194 - acc: 0.7500
Epoch 189/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1189 - acc: 0.7500
Epoch 190/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1184 - acc: 0.7500
Epoch 191/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1179 - acc: 0.7500
Epoch 192/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1174 - acc: 1.0000
Epoch 193/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1169 - acc: 1.0000
Epoch 194/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1164 - acc: 1.0000
Epoch 195/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1159 - acc: 1.0000
Epoch 196/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1154 - acc: 1.0000
Epoch 197/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1150 - acc: 1.0000
Epoch 198/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1145 - acc: 1.0000
Epoch 199/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1140 - acc: 1.0000
Epoch 200/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1136 - acc: 1.0000
1/1 [==============================] - 0s 78ms/step - loss: 0.1131 - acc: 1.0000


[0.11312820017337799, 1.0]

 

텐서플로우를 통한 AND게이트

import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import mse
tf.random.set_seed(777)
# 데이터
data = np.array([[0,0],[1,0],[0,1],[1,1]])
# 라벨링
label = np.array([[0],[0],[0],[1]]) # AND 구현
model = Sequential()

model.add(Dense(1, input_shape = (2,), activation = 'linear')) # 퍼셉트론
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
model.fit(data, label, epochs = 2000, verbose = 0)
model.get_weights()
model.predict(data).flatten()
model.evaluate(data, label) # 평가, 손실함수, 정확도

1/1 [==============================] - 0s 53ms/step - loss: 0.0625 - acc: 1.0000
[0.06250044703483582, 1.0]

 

from tensorflow import keras
import numpy
x = numpy.array([0,1,2,3,4])
y = x*2+1 # [1,3,5,7,9]
model = keras.models.Sequential()
# 1개층 만들기
model.add(keras.layers.Dense(1, input_shape=(1,)))
# 로스함수 : mse, 활성화함수는 default linear
model.compile('SGD','mse')
# verbose = 0 학습내용 안보여줌
model.fit(x[:2], y[:2], epochs=1000, verbose=0)
model.get_weights()
# [array([[1.9739313]], dtype=float32), 가중치 : array([1.0161117], dtype=float32)]
# 0, 1 중 어디에 더가까운가?

# 이후값
model.predict(x[2:])

array([[5.000806],
       [7.001389],
       [9.001972]], dtype=float32)

 

model.predict([5])

# array([[11.002556]], dtype=float32)

 

# 백터 내적
from tensorflow import keras
import numpy
# 난수발생기, 10행 5열 실수형 난수
#              난수의 갯수분포가 균등하게 생성
x = tf.random.uniform((10,5)) # 세로랑
w = tf.random.uniform((5,3)) # 가로랑 곱
# 행렬 곱
d = tf.matmul(x, w)
print(d.shape)

# (10, 3)

 

x

<tf.Tensor: shape=(10, 5), dtype=float32, numpy=
array([[0.1357429 , 0.07509017, 0.2639438 , 0.47604764, 0.39591897],
       [0.14548802, 0.17393434, 0.00936472, 0.8090905 , 0.617025  ],
       [0.8713819 , 0.558359  , 0.17226672, 0.50340676, 0.18701088],
       [0.9073597 , 0.717615  , 0.38108468, 0.8958354 , 0.59624827],
       [0.77847326, 0.4488796 , 0.14225698, 0.8686327 , 0.03972971],
       [0.3629743 , 0.55276537, 0.3255931 , 0.5238236 , 0.05080891],
       [0.01347697, 0.3558432 , 0.77311885, 0.48737752, 0.5625943 ],
       [0.02250803, 0.8551339 , 0.36489332, 0.5632981 , 0.09144831],
       [0.25097954, 0.5333061 , 0.426386  , 0.19805324, 0.28281295],
       [0.99601805, 0.4646746 , 0.0783782 , 0.66289246, 0.17973018]],
      dtype=float32)>

 

# LINEAR로 구현이 안됨
# 텐서플로우를 통한 XOR게이트
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import mse
tf.random.set_seed(777)
# 데이터
data = np.array([[0,0],[1,0],[0,1],[1,1]])
# 라벨링
label = np.array([[0],[1],[1],[0]])
model = Sequential()

model.add(Dense(1, input_shape = (2,), activation = 'linear')) # 퍼셉트론
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
model.fit(data, label, epochs = 2000, verbose = 0)
model.get_weights()
model.predict(data).flatten()
model.evaluate(data, label) # 평가, 손실함수, 정확도
# 1/1 [==============================] - 0s 52ms/step - loss: 0.2500 - acc: 0.5000
# 손실도 0.25, 정확도 0.5 => LINEAR로 는 실패, 

1/1 [==============================] - 0s 52ms/step - loss: 0.2500 - acc: 0.5000
[0.25, 0.5]

 

텐서플로우를 통한 XOR게이트

import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop, SGD
from tensorflow.keras.losses import mse
tf.random.set_seed(777)
# 데이터
data = np.array([[0,0],[1,0],[0,1],[1,1]])
# 라벨링
label = np.array([[0],[1],[1],[0]])
model = Sequential()

# 2개층
# 32개층, 활성화함수 relu
# relu 음수는 0, 양수는 그대로
model.add(Dense(32, input_shape = (2,), activation = 'relu')) # 퍼셉트론
# sigmoid는 0 ~ 1.0 이상의 값리턴
model.add(Dense(1, activation = 'sigmoid')) # 퍼셉트론

# optimizer 최적 위치 찾아가는 방식
# RMSprop : adagrad 알고리즘 보완 : 이전값을 참조해서, 해당 보폭을 찾음
# adagrad : 처음 첩근시 큰보폭, 가본곳은 작은 보폭 =
#            많은 변동시에 학습률이 감소될 수 있음


# model.compile(optimizer = RMSprop(), loss = mse, metrics = ['acc'])
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
model.fit(data, label, epochs = 100)
model.get_weights()
predict = model.predict(data).flatten()
model.evaluate(data, label) # 평가, 손실함수, 정확도
print(predict)
# 1/1 [==============================] - 0s 56ms/step - loss: 0.2106 - acc: 1.0000
# [0.48657197 0.54643464 0.55219495 0.44657207]

Epoch 1/100
1/1 [==============================] - 0s 163ms/step - loss: 0.2646 - acc: 0.5000
Epoch 2/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2644 - acc: 0.2500
Epoch 3/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2643 - acc: 0.2500
Epoch 4/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2642 - acc: 0.2500
Epoch 5/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2640 - acc: 0.2500
Epoch 6/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2639 - acc: 0.2500
Epoch 7/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2638 - acc: 0.2500
Epoch 8/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2637 - acc: 0.2500
Epoch 9/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2635 - acc: 0.2500
Epoch 10/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2634 - acc: 0.2500
Epoch 11/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2633 - acc: 0.2500
Epoch 12/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2632 - acc: 0.2500
Epoch 13/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2630 - acc: 0.2500
Epoch 14/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2629 - acc: 0.2500
Epoch 15/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2628 - acc: 0.2500
Epoch 16/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2627 - acc: 0.2500
Epoch 17/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2625 - acc: 0.2500
Epoch 18/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2624 - acc: 0.2500
Epoch 19/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2623 - acc: 0.2500
Epoch 20/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2622 - acc: 0.2500
Epoch 21/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2620 - acc: 0.2500
Epoch 22/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2619 - acc: 0.2500
Epoch 23/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2618 - acc: 0.2500
Epoch 24/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2617 - acc: 0.2500
Epoch 25/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2616 - acc: 0.2500
Epoch 26/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2614 - acc: 0.2500
Epoch 27/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2613 - acc: 0.2500
Epoch 28/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2612 - acc: 0.2500
Epoch 29/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2611 - acc: 0.2500
Epoch 30/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2610 - acc: 0.2500
Epoch 31/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2608 - acc: 0.2500
Epoch 32/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2607 - acc: 0.2500
Epoch 33/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2606 - acc: 0.2500
Epoch 34/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2605 - acc: 0.2500
Epoch 35/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2604 - acc: 0.2500
Epoch 36/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2602 - acc: 0.2500
Epoch 37/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2601 - acc: 0.2500
Epoch 38/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2600 - acc: 0.2500
Epoch 39/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2599 - acc: 0.2500
Epoch 40/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2598 - acc: 0.2500
Epoch 41/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2597 - acc: 0.2500
Epoch 42/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2596 - acc: 0.2500
Epoch 43/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2594 - acc: 0.2500
Epoch 44/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2593 - acc: 0.2500
Epoch 45/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2592 - acc: 0.2500
Epoch 46/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2591 - acc: 0.2500
Epoch 47/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2590 - acc: 0.2500
Epoch 48/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2589 - acc: 0.2500
Epoch 49/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2588 - acc: 0.2500
Epoch 50/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2586 - acc: 0.2500
Epoch 51/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2585 - acc: 0.2500
Epoch 52/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2584 - acc: 0.2500
Epoch 53/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2583 - acc: 0.2500
Epoch 54/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2582 - acc: 0.2500
Epoch 55/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2581 - acc: 0.2500
Epoch 56/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2580 - acc: 0.2500
Epoch 57/100
1/1 [==============================] - 0s 5ms/step - loss: 0.2579 - acc: 0.2500
Epoch 58/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2578 - acc: 0.2500
Epoch 59/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2576 - acc: 0.2500
Epoch 60/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2575 - acc: 0.2500
Epoch 61/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2574 - acc: 0.2500
Epoch 62/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2573 - acc: 0.2500
Epoch 63/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2572 - acc: 0.2500
Epoch 64/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2571 - acc: 0.2500
Epoch 65/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2570 - acc: 0.2500
Epoch 66/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2569 - acc: 0.2500
Epoch 67/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2568 - acc: 0.2500
Epoch 68/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2567 - acc: 0.2500
Epoch 69/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2566 - acc: 0.2500
Epoch 70/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2565 - acc: 0.2500
Epoch 71/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2563 - acc: 0.2500
Epoch 72/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2562 - acc: 0.2500
Epoch 73/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2561 - acc: 0.2500
Epoch 74/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2560 - acc: 0.2500
Epoch 75/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2559 - acc: 0.2500
Epoch 76/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2558 - acc: 0.2500
Epoch 77/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2557 - acc: 0.2500
Epoch 78/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2556 - acc: 0.2500
Epoch 79/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2555 - acc: 0.2500
Epoch 80/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2554 - acc: 0.2500
Epoch 81/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2553 - acc: 0.2500
Epoch 82/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2552 - acc: 0.2500
Epoch 83/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2551 - acc: 0.2500
Epoch 84/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2550 - acc: 0.2500
Epoch 85/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2549 - acc: 0.2500
Epoch 86/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2548 - acc: 0.2500
Epoch 87/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2547 - acc: 0.2500
Epoch 88/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2546 - acc: 0.2500
Epoch 89/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2545 - acc: 0.2500
Epoch 90/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2544 - acc: 0.2500
Epoch 91/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2543 - acc: 0.2500
Epoch 92/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2542 - acc: 0.2500
Epoch 93/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2541 - acc: 0.2500
Epoch 94/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2540 - acc: 0.2500
Epoch 95/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2539 - acc: 0.2500
Epoch 96/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2538 - acc: 0.2500
Epoch 97/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2537 - acc: 0.2500
Epoch 98/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2536 - acc: 0.2500
Epoch 99/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2535 - acc: 0.2500
Epoch 100/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2534 - acc: 0.2500
WARNING:tensorflow:8 out of the last 9 calls to <function Model.make_predict_function.<locals>.predict_function at 0x0000026102109700> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
WARNING:tensorflow:7 out of the last 7 calls to <function Model.make_test_function.<locals>.test_function at 0x0000026102109B80> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
1/1 [==============================] - 0s 56ms/step - loss: 0.2533 - acc: 0.2500
[0.50530165 0.44862053 0.49225846 0.442587  ]

 

import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop, SGD
from tensorflow.keras.losses import mse
tf.random.set_seed(777)
# 데이터
data = np.array([[0,0],[1,0],[0,1],[1,1]])
# 라벨링
label = np.array([[0],[1],[1],[0]])
model = Sequential()

# 2개층
# 32개층, 활성화함수 relu
# Dense 층쌓기
# relu 음수는 0, 양수는 그대로
model.add(Dense(32, input_shape = (2,), activation = 'relu')) # 퍼셉트론
# sigmoid는 0 ~ 1.0 이상의 값리턴
model.add(Dense(1, activation = 'sigmoid')) # 퍼셉트론

# optimizer 최적 위치 찾아가는 방식
# RMSprop : adagrad 알고리즘 보완 : 이전값을 참조해서, 해당 보폭을 찾음
# adagrad : 처음 첩근시 큰보폭, 가본곳은 작은 보폭 =
#            많은 변동시에 학습률이 감소될 수 있음


# model.compile(optimizer = RMSprop(), loss = mse, metrics = ['acc'])
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
model.fit(data, label, epochs = 100)
model.get_weights()
predict = model.predict(data).flatten()
model.evaluate(data, label) # 평가, 손실함수, 정확도
print(predict)
# 1/1 [==============================] - 0s 249ms/step - loss: 0.2533 - acc: 0.2500
# [0.50530165 0.44862053 0.49225846 0.442587  ]

Epoch 1/100
1/1 [==============================] - 0s 161ms/step - loss: 0.2646 - acc: 0.5000
Epoch 2/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2644 - acc: 0.2500
Epoch 3/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2643 - acc: 0.2500
Epoch 4/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2642 - acc: 0.2500
Epoch 5/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2640 - acc: 0.2500
Epoch 6/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2639 - acc: 0.2500
Epoch 7/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2638 - acc: 0.2500
Epoch 8/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2637 - acc: 0.2500
Epoch 9/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2635 - acc: 0.2500
Epoch 10/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2634 - acc: 0.2500
Epoch 11/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2633 - acc: 0.2500
Epoch 12/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2632 - acc: 0.2500
Epoch 13/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2630 - acc: 0.2500
Epoch 14/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2629 - acc: 0.2500
Epoch 15/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2628 - acc: 0.2500
Epoch 16/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2627 - acc: 0.2500
Epoch 17/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2625 - acc: 0.2500
Epoch 18/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2624 - acc: 0.2500
Epoch 19/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2623 - acc: 0.2500
Epoch 20/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2622 - acc: 0.2500
Epoch 21/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2620 - acc: 0.2500
Epoch 22/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2619 - acc: 0.2500
Epoch 23/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2618 - acc: 0.2500
Epoch 24/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2617 - acc: 0.2500
Epoch 25/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2616 - acc: 0.2500
Epoch 26/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2614 - acc: 0.2500
Epoch 27/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2613 - acc: 0.2500
Epoch 28/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2612 - acc: 0.2500
Epoch 29/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2611 - acc: 0.2500
Epoch 30/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2610 - acc: 0.2500
Epoch 31/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2608 - acc: 0.2500
Epoch 32/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2607 - acc: 0.2500
Epoch 33/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2606 - acc: 0.2500
Epoch 34/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2605 - acc: 0.2500
Epoch 35/100
1/1 [==============================] - 0s 8ms/step - loss: 0.2604 - acc: 0.2500
Epoch 36/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2602 - acc: 0.2500
Epoch 37/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2601 - acc: 0.2500
Epoch 38/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2600 - acc: 0.2500
Epoch 39/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2599 - acc: 0.2500
Epoch 40/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2598 - acc: 0.2500
Epoch 41/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2597 - acc: 0.2500
Epoch 42/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2596 - acc: 0.2500
Epoch 43/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2594 - acc: 0.2500
Epoch 44/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2593 - acc: 0.2500
Epoch 45/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2592 - acc: 0.2500
Epoch 46/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2591 - acc: 0.2500
Epoch 47/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2590 - acc: 0.2500
Epoch 48/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2589 - acc: 0.2500
Epoch 49/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2588 - acc: 0.2500
Epoch 50/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2586 - acc: 0.2500
Epoch 51/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2585 - acc: 0.2500
Epoch 52/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2584 - acc: 0.2500
Epoch 53/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2583 - acc: 0.2500
Epoch 54/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2582 - acc: 0.2500
Epoch 55/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2581 - acc: 0.2500
Epoch 56/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2580 - acc: 0.2500
Epoch 57/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2579 - acc: 0.2500
Epoch 58/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2578 - acc: 0.2500
Epoch 59/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2576 - acc: 0.2500
Epoch 60/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2575 - acc: 0.2500
Epoch 61/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2574 - acc: 0.2500
Epoch 62/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2573 - acc: 0.2500
Epoch 63/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2572 - acc: 0.2500
Epoch 64/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2571 - acc: 0.2500
Epoch 65/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2570 - acc: 0.2500
Epoch 66/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2569 - acc: 0.2500
Epoch 67/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2568 - acc: 0.2500
Epoch 68/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2567 - acc: 0.2500
Epoch 69/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2566 - acc: 0.2500
Epoch 70/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2565 - acc: 0.2500
Epoch 71/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2563 - acc: 0.2500
Epoch 72/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2562 - acc: 0.2500
Epoch 73/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2561 - acc: 0.2500
Epoch 74/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2560 - acc: 0.2500
Epoch 75/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2559 - acc: 0.2500
Epoch 76/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2558 - acc: 0.2500
Epoch 77/100
1/1 [==============================] - 0s 4ms/step - loss: 0.2557 - acc: 0.2500
Epoch 78/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2556 - acc: 0.2500
Epoch 79/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2555 - acc: 0.2500
Epoch 80/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2554 - acc: 0.2500
Epoch 81/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2553 - acc: 0.2500
Epoch 82/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2552 - acc: 0.2500
Epoch 83/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2551 - acc: 0.2500
Epoch 84/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2550 - acc: 0.2500
Epoch 85/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2549 - acc: 0.2500
Epoch 86/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2548 - acc: 0.2500
Epoch 87/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2547 - acc: 0.2500
Epoch 88/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2546 - acc: 0.2500
Epoch 89/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2545 - acc: 0.2500
Epoch 90/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2544 - acc: 0.2500
Epoch 91/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2543 - acc: 0.2500
Epoch 92/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2542 - acc: 0.2500
Epoch 93/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2541 - acc: 0.2500
Epoch 94/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2540 - acc: 0.2500
Epoch 95/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2539 - acc: 0.2500
Epoch 96/100
1/1 [==============================] - 0s 3ms/step - loss: 0.2538 - acc: 0.2500
Epoch 97/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2537 - acc: 0.2500
Epoch 98/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2536 - acc: 0.2500
Epoch 99/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2535 - acc: 0.2500
Epoch 100/100
1/1 [==============================] - 0s 2ms/step - loss: 0.2534 - acc: 0.2500
WARNING:tensorflow:9 out of the last 10 calls to <function Model.make_predict_function.<locals>.predict_function at 0x000002610365D0D0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
WARNING:tensorflow:8 out of the last 8 calls to <function Model.make_test_function.<locals>.test_function at 0x000002610365DEE0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
1/1 [==============================] - 0s 249ms/step - loss: 0.2533 - acc: 0.2500
[0.50530165 0.44862053 0.49225846 0.442587  ]

 

 

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

42. Fashion-MNIST 딥러닝 예측  (0) 2021.11.25
41. MNIST 딥러닝 예측  (0) 2021.11.25
39. Tensorflow 구현  (0) 2021.11.25
38. 학생 점수 분석 || Kmeans  (0) 2021.11.25
37. iris || Kmeans  (0) 2021.11.25
728x90
반응형

# 퍼셉트론 : 1957년

기존
and
x1 | x2 | y
0  | 0  | 0
0  | 1  | 0
1  | 0  | 0
1  | 1  | 1
b + x1*w1 + ... = y
b 편향
b 보다 작으면 0, 크면 1 이라고 하면 and 알고리즘을 알수있다
x1, x2 입력값
w1, w2 가중치
y = 0 : if x1w1 + x2w2 <=b
y = 1 : if x1w1 + x2w2 > b    

 

# and게이트

import numpy as np
def AND (x1, x2) :
    x = np.array([x1, x2])
    w = np.array([0.5, 0.5])
    b = -0.5 # -0.8이되어도 영향 별로 없음
    tmp = np.sum(w*x) + b
    if tmp <= 0 :
        return 0
    else : 
        return 1
# 퍼셉트론 알고리즘
for xs in [(0,0),(1,0),(0,1),(1,1)] :
    y = AND(xs[0], xs[1])
    print(str(xs) + "=>" + str(y))
# 가중치와 편향을 찾아가는 방법
# 모든것의 최적의 가중치와 편향을 찾음

(0, 0)=>0
(1, 0)=>0
(0, 1)=>0
(1, 1)=>1

 

# OR게이트

import numpy as np
def OR (x1, x2) :
    x = np.array([x1, x2])
    w = np.array([0.5, 0.5])
    b = -0.2 # -0.8이되어도 영향 별로 없음
    tmp = np.sum(w*x) + b
    if tmp <= 0 :
        return 0
    else : 
        return 1
# 퍼셉트론 알고리즘
for xs in [(0,0),(1,0),(0,1),(1,1)] :
    y = OR(xs[0], xs[1])
    print(str(xs) + "=>" + str(y))
# -0.2로 바뀌었을 뿐 => 이걸로 조합이 OR이 ㅣㅗㄷㅁ

(0, 0)=>0
(1, 0)=>1
(0, 1)=>1
(1, 1)=>1

 

# nand게이트

import numpy as np
def NAND (x1, x2) :
    x = np.array([x1, x2])
    w = np.array([-0.5, -0.5])
    b = 0.8 # -0.8이되어도 영향 별로 없음
    tmp = np.sum(w*x) + b
    if tmp <= 0 :
        return 0
    else : 
        return 1
# 퍼셉트론 알고리즘
for xs in [(0,0),(1,0),(0,1),(1,1)] :
    y = NAND(xs[0], xs[1])
    print(str(xs) + "=>" + str(y))
    

(0, 0)=>1
(1, 0)=>1
(0, 1)=>1
(1, 1)=>0

 

# XOR게이트

 다중 신경망
import numpy as np
def XOR (x1, x2) :
    s1 = NAND(x1, x2)
    s2 = OR(x1, x2)
    y = AND(s1, s2) # 2층 구조
    return y
# 퍼셉트론 알고리즘
for xs in [(0,0),(1,0),(0,1),(1,1)] :
    y = XOR(xs[0], xs[1])
    print(str(xs) + "=>" + str(y))


(0, 0)=>1
(1, 0)=>0
(0, 1)=>0
(1, 1)=>1

# 같으면 1 다르면 0
# 10 # 선형 표현불가
# 01
x1을 
다중 퍼셉트론

 

# 활성화 함수 : 비선형데이터로 변환

# 계단 함수 : 0, 1로 리턴

import matplotlib.pyplot as plt
def step_function (x):
    return np.array(x > 0,dtype = np.int)
    # 0보다 크면 실수 출력
x = np.arange(-5.0, 5.0, 0.1)
y = step_function(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()

# 시그모이드함수 0 ~ 1.0

def sigmoid(x) :
    return 1 / (1 + np.exp(-x)) # 분류
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1, 1.1)
plt.show()

# ReLU : 0 ~ 이상의 값

def relu(x) :
    return np.maximum(0,x) # 회귀분석에서
x = np.arange(-5.0, 5.0, 0.1)
y = relu(x)
plt.plot(x, y)
plt.show()

# cost function (loss functin) # 어떻게 최적의 값으로 접근하지?
# 미분값이 최소

 

batch gd

stochastic

mini batch gc

 

# 오차역전파
수식을 통해
알고리즘을 통해
정답과 예측을 손실함수로 비교하고 다시 구함 =>

 

import numpy as np
import tensorflow as tf
print(tf.__version__)

2.4.1

 

 

a = tf.constant(2) # 스칼라값을 텐서로 선언
b = tf.constant([1, 2]) # 백터를 텐서로 선언
c = tf.constant([[1, 2],[3, 4]]) 
# rank : a텐서의 차원리터
print(tf.rank(a))
print(tf.rank(b))
print(tf.rank(c)) #

tf.Tensor(0, shape=(), dtype=int32)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)

 

# 더하기 add
a = tf.constant(3)
b = tf.constant(2)
print(tf.add(a,b))

# tf.Tensor(5, shape=(), dtype=int32)


# 빼기 subtract
print(tf.subtract(a,b))
# 곱셈
print(tf.multiply(a,b))
# 나눗셈
print(tf.divide(a,b))
# 나눗셈 결과값 많
print(tf.divide(a,b).numpy())
print(tf.multiply(a,b).numpy())

tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(6, shape=(), dtype=int32)
tf.Tensor(1.5, shape=(), dtype=float64)
1.5
6

 

c_square = np.square(tf.add(a,b).numpy(), dtype=np.float32)
c_square

# 25.0

 

c_tensor = tf.convert_to_tensor(c_square)
c_tensor

# <tf.Tensor: shape=(), dtype=float32, numpy=25.0>

 

@tf.function # 언어펑션
def square_pos1(x) :
    if x > 0 :
        x = x*x
    else :
        x = x*-1
    return x
print(square_pos1(tf.constant(2)))
print(square_pos1.__class__)

# tf.Tensor(4, shape=(), dtype=int32)
# <class 'tensorflow.python.eager.def_function.Function'>

 

def square_pos2(x) :
    if x > 0 :
        x = x*x
    else :
        x = x*-1
    return x
print(square_pos2(tf.constant(2)))
print(square_pos2.__class__)

# tf.Tensor(4, shape=(), dtype=int32)
# <class 'function'>

 

# 텐서플로우를 통한 OR게이트
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import mse
tf.random.set_seed(777)
# 데이터
data = np.array([[0,0],[1,0],[0,1],[1,1]])
# 라벨링
label = np.array([[0],[1],[1],[1]])
model = Sequential()
model.add(Dense(1, input_shape = (2,), activation = 'linear')) # 퍼셉트론
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
# epochs 100으로 하면 학습하다가 관둬서, 200으로 함
model.fit(data, label, epochs = 200)
# 그 값은 이거야
model.get_weights()
# 머신러닝은 값 하나하나 매겨주는데 딥러닝은 라벨링하고 주면 알아서 찾아감

Epoch 1/200
1/1 [==============================] - 1s 700ms/step - loss: 1.4290 - acc: 0.5000
Epoch 2/200
1/1 [==============================] - 0s 2ms/step - loss: 1.3602 - acc: 0.5000
Epoch 3/200
1/1 [==============================] - 0s 2ms/step - loss: 1.2956 - acc: 0.5000
Epoch 4/200
1/1 [==============================] - 0s 2ms/step - loss: 1.2349 - acc: 0.5000
Epoch 5/200
1/1 [==============================] - 0s 2ms/step - loss: 1.1779 - acc: 0.5000
Epoch 6/200
1/1 [==============================] - 0s 2ms/step - loss: 1.1242 - acc: 0.5000
Epoch 7/200
1/1 [==============================] - 0s 2ms/step - loss: 1.0738 - acc: 0.5000
Epoch 8/200
1/1 [==============================] - 0s 2ms/step - loss: 1.0264 - acc: 0.5000
Epoch 9/200
1/1 [==============================] - 0s 2ms/step - loss: 0.9819 - acc: 0.5000
Epoch 10/200
1/1 [==============================] - 0s 2ms/step - loss: 0.9399 - acc: 0.5000
Epoch 11/200
1/1 [==============================] - 0s 3ms/step - loss: 0.9005 - acc: 0.5000
Epoch 12/200
1/1 [==============================] - 0s 2ms/step - loss: 0.8634 - acc: 0.5000
Epoch 13/200
1/1 [==============================] - 0s 4ms/step - loss: 0.8284 - acc: 0.5000
Epoch 14/200
1/1 [==============================] - 0s 2ms/step - loss: 0.7955 - acc: 0.5000
Epoch 15/200
1/1 [==============================] - 0s 3ms/step - loss: 0.7646 - acc: 0.5000
Epoch 16/200
1/1 [==============================] - 0s 2ms/step - loss: 0.7354 - acc: 0.5000
Epoch 17/200
1/1 [==============================] - 0s 2ms/step - loss: 0.7079 - acc: 0.5000
Epoch 18/200
1/1 [==============================] - 0s 2ms/step - loss: 0.6820 - acc: 0.5000
Epoch 19/200
1/1 [==============================] - 0s 2ms/step - loss: 0.6576 - acc: 0.5000
Epoch 20/200
1/1 [==============================] - 0s 2ms/step - loss: 0.6346 - acc: 0.5000
Epoch 21/200
1/1 [==============================] - 0s 3ms/step - loss: 0.6129 - acc: 0.5000
Epoch 22/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5925 - acc: 0.5000
Epoch 23/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5732 - acc: 0.5000
Epoch 24/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5549 - acc: 0.5000
Epoch 25/200
1/1 [==============================] - 0s 3ms/step - loss: 0.5377 - acc: 0.5000
Epoch 26/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5215 - acc: 0.5000
Epoch 27/200
1/1 [==============================] - 0s 2ms/step - loss: 0.5061 - acc: 0.5000
Epoch 28/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4916 - acc: 0.5000
Epoch 29/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4778 - acc: 0.5000
Epoch 30/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4648 - acc: 0.5000
Epoch 31/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4525 - acc: 0.7500
Epoch 32/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4409 - acc: 0.7500
Epoch 33/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4298 - acc: 0.7500
Epoch 34/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4193 - acc: 0.7500
Epoch 35/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4094 - acc: 0.7500
Epoch 36/200
1/1 [==============================] - 0s 2ms/step - loss: 0.4000 - acc: 0.7500
Epoch 37/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3911 - acc: 0.7500
Epoch 38/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3826 - acc: 0.7500
Epoch 39/200
1/1 [==============================] - 0s 3ms/step - loss: 0.3745 - acc: 0.7500
Epoch 40/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3668 - acc: 0.7500
Epoch 41/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3595 - acc: 0.7500
Epoch 42/200
1/1 [==============================] - 0s 3ms/step - loss: 0.3525 - acc: 0.7500
Epoch 43/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3459 - acc: 0.7500
Epoch 44/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3396 - acc: 0.7500
Epoch 45/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3336 - acc: 0.7500
Epoch 46/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3278 - acc: 0.7500
Epoch 47/200
1/1 [==============================] - 0s 3ms/step - loss: 0.3223 - acc: 0.7500
Epoch 48/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3170 - acc: 0.7500
Epoch 49/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3120 - acc: 0.7500
Epoch 50/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3072 - acc: 0.7500
Epoch 51/200
1/1 [==============================] - 0s 2ms/step - loss: 0.3026 - acc: 0.7500
Epoch 52/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2982 - acc: 0.7500
Epoch 53/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2939 - acc: 0.7500
Epoch 54/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2898 - acc: 0.7500
Epoch 55/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2859 - acc: 0.7500
Epoch 56/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2822 - acc: 0.7500
Epoch 57/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2785 - acc: 0.7500
Epoch 58/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2750 - acc: 0.7500
Epoch 59/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2717 - acc: 0.7500
Epoch 60/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2684 - acc: 0.7500
Epoch 61/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2653 - acc: 0.7500
Epoch 62/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2623 - acc: 0.7500
Epoch 63/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2594 - acc: 0.7500
Epoch 64/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2565 - acc: 0.7500
Epoch 65/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2538 - acc: 0.7500
Epoch 66/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2511 - acc: 0.7500
Epoch 67/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2486 - acc: 0.7500
Epoch 68/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2461 - acc: 0.7500
Epoch 69/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2436 - acc: 0.7500
Epoch 70/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2413 - acc: 0.7500
Epoch 71/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2390 - acc: 0.7500
Epoch 72/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2368 - acc: 0.7500
Epoch 73/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2346 - acc: 0.7500
Epoch 74/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2325 - acc: 0.7500
Epoch 75/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2304 - acc: 0.7500
Epoch 76/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2284 - acc: 0.7500
Epoch 77/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2264 - acc: 0.7500
Epoch 78/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2245 - acc: 0.7500
Epoch 79/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2226 - acc: 0.7500
Epoch 80/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2208 - acc: 0.7500
Epoch 81/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2190 - acc: 0.7500
Epoch 82/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2173 - acc: 0.7500
Epoch 83/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2155 - acc: 0.7500
Epoch 84/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2139 - acc: 0.7500
Epoch 85/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2122 - acc: 0.7500
Epoch 86/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2106 - acc: 0.7500
Epoch 87/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2090 - acc: 0.7500
Epoch 88/200
1/1 [==============================] - 0s 3ms/step - loss: 0.2074 - acc: 0.7500
Epoch 89/200
1/1 [==============================] - 0s 999us/step - loss: 0.2059 - acc: 0.7500
Epoch 90/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2044 - acc: 0.7500
Epoch 91/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2029 - acc: 0.7500
Epoch 92/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2015 - acc: 0.7500
Epoch 93/200
1/1 [==============================] - 0s 2ms/step - loss: 0.2000 - acc: 0.7500
Epoch 94/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1986 - acc: 0.7500
Epoch 95/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1973 - acc: 0.7500
Epoch 96/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1959 - acc: 0.7500
Epoch 97/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1946 - acc: 0.7500
Epoch 98/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1932 - acc: 0.7500
Epoch 99/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1919 - acc: 0.7500
Epoch 100/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1907 - acc: 0.7500
Epoch 101/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1894 - acc: 0.7500
Epoch 102/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1881 - acc: 0.7500
Epoch 103/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1869 - acc: 0.7500
Epoch 104/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1857 - acc: 0.7500
Epoch 105/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1845 - acc: 0.7500
Epoch 106/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1833 - acc: 0.7500
Epoch 107/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1822 - acc: 0.7500
Epoch 108/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1810 - acc: 0.7500
Epoch 109/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1799 - acc: 0.7500
Epoch 110/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1787 - acc: 0.7500
Epoch 111/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1776 - acc: 0.7500
Epoch 112/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1765 - acc: 0.7500
Epoch 113/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1755 - acc: 0.7500
Epoch 114/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1744 - acc: 0.7500
Epoch 115/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1733 - acc: 0.7500
Epoch 116/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1723 - acc: 0.7500
Epoch 117/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1712 - acc: 0.7500
Epoch 118/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1702 - acc: 0.7500
Epoch 119/200
1/1 [==============================] - 0s 4ms/step - loss: 0.1692 - acc: 0.7500
Epoch 120/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1682 - acc: 0.7500
Epoch 121/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1672 - acc: 0.7500
Epoch 122/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1663 - acc: 0.7500
Epoch 123/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1653 - acc: 0.7500
Epoch 124/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1643 - acc: 0.7500
Epoch 125/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1634 - acc: 0.7500
Epoch 126/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1625 - acc: 0.7500
Epoch 127/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1615 - acc: 0.7500
Epoch 128/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1606 - acc: 0.7500
Epoch 129/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1597 - acc: 0.7500
Epoch 130/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1588 - acc: 0.7500
Epoch 131/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1579 - acc: 0.7500
Epoch 132/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1570 - acc: 0.7500
Epoch 133/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1562 - acc: 0.7500
Epoch 134/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1553 - acc: 0.7500
Epoch 135/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1545 - acc: 0.7500
Epoch 136/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1536 - acc: 0.7500
Epoch 137/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1528 - acc: 0.7500
Epoch 138/200
1/1 [==============================] - 0s 1ms/step - loss: 0.1520 - acc: 0.7500
Epoch 139/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1511 - acc: 0.7500
Epoch 140/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1503 - acc: 0.7500
Epoch 141/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1495 - acc: 0.7500
Epoch 142/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1487 - acc: 0.7500
Epoch 143/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1480 - acc: 0.7500
Epoch 144/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1472 - acc: 0.7500
Epoch 145/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1464 - acc: 0.7500
Epoch 146/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1456 - acc: 0.7500
Epoch 147/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1449 - acc: 0.7500
Epoch 148/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1441 - acc: 0.7500
Epoch 149/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1434 - acc: 0.7500
Epoch 150/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1427 - acc: 0.7500
Epoch 151/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1419 - acc: 0.7500
Epoch 152/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1412 - acc: 0.7500
Epoch 153/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1405 - acc: 0.7500
Epoch 154/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1398 - acc: 0.7500
Epoch 155/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1391 - acc: 0.7500
Epoch 156/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1384 - acc: 0.7500
Epoch 157/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1377 - acc: 0.7500
Epoch 158/200
1/1 [==============================] - 0s 1000us/step - loss: 0.1370 - acc: 0.7500
Epoch 159/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1364 - acc: 0.7500
Epoch 160/200
1/1 [==============================] - 0s 4ms/step - loss: 0.1357 - acc: 0.7500
Epoch 161/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1350 - acc: 0.7500
Epoch 162/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1344 - acc: 0.7500
Epoch 163/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1337 - acc: 0.7500
Epoch 164/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1331 - acc: 0.7500
Epoch 165/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1325 - acc: 0.7500
Epoch 166/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1318 - acc: 0.7500
Epoch 167/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1312 - acc: 0.7500
Epoch 168/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1306 - acc: 0.7500
Epoch 169/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1300 - acc: 0.7500
Epoch 170/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1294 - acc: 0.7500
Epoch 171/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1288 - acc: 0.7500
Epoch 172/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1282 - acc: 0.7500
Epoch 173/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1276 - acc: 0.7500
Epoch 174/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1270 - acc: 0.7500
Epoch 175/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1264 - acc: 0.7500
Epoch 176/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1258 - acc: 0.7500
Epoch 177/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1253 - acc: 0.7500
Epoch 178/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1247 - acc: 0.7500
Epoch 179/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1242 - acc: 0.7500
Epoch 180/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1236 - acc: 0.7500
Epoch 181/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1231 - acc: 0.7500
Epoch 182/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1225 - acc: 0.7500
Epoch 183/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1220 - acc: 0.7500
Epoch 184/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1214 - acc: 0.7500
Epoch 185/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1209 - acc: 0.7500
Epoch 186/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1204 - acc: 0.7500
Epoch 187/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1199 - acc: 0.7500
Epoch 188/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1194 - acc: 0.7500
Epoch 189/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1189 - acc: 0.7500
Epoch 190/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1184 - acc: 0.7500
Epoch 191/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1179 - acc: 0.7500
Epoch 192/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1174 - acc: 1.0000
Epoch 193/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1169 - acc: 1.0000
Epoch 194/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1164 - acc: 1.0000
Epoch 195/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1159 - acc: 1.0000
Epoch 196/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1154 - acc: 1.0000
Epoch 197/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1150 - acc: 1.0000
Epoch 198/200
1/1 [==============================] - 0s 2ms/step - loss: 0.1145 - acc: 1.0000
Epoch 199/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1140 - acc: 1.0000
Epoch 200/200
1/1 [==============================] - 0s 3ms/step - loss: 0.1136 - acc: 1.0000
[array([[0.5995085 ],
        [0.06513146]], dtype=float32),
 array([0.4472612], dtype=float32)]

 

 

반응형
728x90
반응형

academy1.csv
0.00MB

import pandas as pd
data = pd.read_csv('academy1.csv')
data.info()

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 32 entries, 0 to 31
Data columns (total 3 columns):
 #   Column  Non-Null Count  Dtype
---  ------  --------------  -----
 0   학번      32 non-null     int64
 1   국어점수    32 non-null     int64
 2   영어점수    32 non-null     int64
dtypes: int64(3)
memory usage: 896.0 bytes

 

from sklearn.cluster import KMeans
km = KMeans(n_clusters = 3)
km.fit(data.iloc[:,1:])
aaa = km.fit_predict(data.iloc[:,1:])

 

import mglearn
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', family = 'Malgun Gothic')
mglearn.plots.plot_kmeans_algorithm()
plt.show()

 

mglearn.plots.plot_kmeans_boundaries()
plt.show()

 

mglearn.discrete_scatter(data.iloc[:,1], data.iloc[:,2], km.labels_)
plt.legend(["클러스터 0","클러스터 1","클러스터 2"], loc='best')
plt.xlabel("국어점수")
plt.ylabel("영어점수")
plt.show()

# 국어점수 100점, 영어점수 80점인 학생은 몇번 클러스터?
km.predict([[100, 80]])
# 0번 클러스터

# array([0])

 

academy2.csv
0.00MB

data = pd.read_csv('academy2.csv')
data.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 18 entries, 0 to 17
Data columns (total 6 columns):
 #   Column  Non-Null Count  Dtype
---  ------  --------------  -----
 0   학번      18 non-null     int64
 1   국어점수    18 non-null     int64
 2   영어점수    18 non-null     int64
 3   수학점수    18 non-null     int64
 4   과학점수    18 non-null     int64
 5   학업성취도   18 non-null     int64
dtypes: int64(6)
memory usage: 992.0 bytes

 

km = KMeans(n_clusters = 3)
km.fit(data.iloc[:,1:])
aaa = km.fit_predict(data.iloc[:,1:])
mglearn.plots.plot_kmeans_algorithm()
plt.show()

 

mglearn.plots.plot_kmeans_boundaries()
plt.show()

 

mglearn.discrete_scatter(data.iloc[:,1], data.iloc[:,2], km.labels_)
plt.legend(["클러스터 0","클러스터 1","클러스터 2"], loc='best')
plt.xlabel("국어점수")
plt.ylabel("영어점수")
plt.show()

 

km.predict([[100, 80, 70, 70, 70]])
# array([0])

km.labels_
# array([0, 0, 2, 0, 0, 2, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 2])

 

for no, cla in enumerate(km.labels_) :
    print(data.iloc[no].tolist(), cla)
    
[1, 90, 80, 80, 80, 80] 0
[2, 90, 75, 75, 75, 75] 0
[3, 65, 90, 90, 90, 90] 2
[4, 90, 80, 80, 80, 80] 0
[5, 90, 75, 75, 75, 75] 0
[6, 65, 90, 90, 90, 90] 2
[7, 90, 80, 80, 80, 80] 0
[8, 90, 75, 75, 75, 75] 0
[9, 65, 90, 60, 88, 80] 2
[10, 90, 80, 60, 30, 40] 1
[11, 90, 75, 85, 60, 70] 0
[12, 65, 90, 60, 88, 80] 2
[13, 90, 30, 40, 30, 40] 1
[14, 90, 60, 70, 60, 70] 0
[15, 65, 88, 80, 88, 80] 2
[16, 90, 30, 40, 30, 40] 1
[17, 90, 60, 70, 60, 70] 0
[18, 65, 88, 80, 88, 80] 2

 

from sklearn.cluster import DBSCAN
model = DBSCAN()
model.fit(data.iloc[:,1:])
clusters = model.fit_predict(data.iloc[:,1:])
mglearn.discrete_scatter(data.iloc[:,1], data.iloc[:,2], model.labels_)
plt.legend(['클러스터 0','클러스터 1','클러스터 2'], loc = 'best')
plt.xlabel('국어점수')
plt.ylabel('영어점수')
plt.show()

# kmeans 
# 성능이좋다. 군집에 많이 사용됨
랜덤으로 만들고 하나하나 만들어감
원 형일 때 좋음
클러스터개수 수동설정

dbscan 밀집도 기준으로 구분
클러스터 자동

 

반응형

+ Recent posts