728x90
반응형
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Embedding, Bidirectional

 

 

vocab_size = 15000

def create_model() :
    model = Sequential([
        Embedding(vocab_size, 32),
        Bidirectional(LSTM(32, return_sequences=True)),
        Dense(32, activation='relu'),
        Dense(1, activation='sigmoid')
    ])
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

 

 

import pandas as pd
test_file = tf.keras.utils.get_file('ratings_test.txt',\
            origin='https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt', extract=True)
test = pd.read_csv(test_file, sep='\t')
test.head()


id	document	label
0	6270596	굳 ㅋ	1
1	9274899	GDNTOPCLASSINTHECLUB	0
2	8544678	뭐야 이 평점들은.... 나쁘진 않지만 10점 짜리는 더더욱 아니잖아	0
3	6825595	지루하지는 않은데 완전 막장임... 돈주고 보기에는....	0
4	6723715	3D만 아니었어도 별 다섯 개 줬을텐데.. 왜 3D로 나와서 제 심기를 불편하게 하죠??	0

 

 

test.shape 
# (50000, 3)

 

 

import konlpy
from konlpy.tag import Okt
okt = Okt()

train_file = tf.keras.utils.get_file('ratings_train.txt',\
            origin='https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt', extract=True)
train = pd.read_csv(train_file, sep='\t')
train.head()

id	document	label
0	9976970	아 더빙.. 진짜 짜증나네요 목소리	0
1	3819312	흠...포스터보고 초딩영화줄....오버연기조차 가볍지 않구나	1
2	10265843	너무재밓었다그래서보는것을추천한다	0
3	9045019	교도소 이야기구먼 ..솔직히 재미는 없다..평점 조정	0
4	6483659	사이몬페그의 익살스런 연기가 돋보였던 영화!스파이더맨에서 늙어보이기만 했던 커스틴 ...	1

 

 

train['document'] = train['document'].str.replace("[^A-Za-z가-힣ㄱ-ㅎㅏ-ㅣ]","")
train = train.dropna()

 

 

def word_tokenization(text) :
    stop_words = ['는','을','를','이','가','의','던','고','하','다','은','에','들','지','게','도']
    return [word for word in okt.morphs(text) if word not in stop_words]

 

 

data = train['document'].apply((lambda x : word_tokenization(x)))
data.head()

0                              [아더, 빙, 진짜, 짜증나네요, 목소리]
1        [흠, 포스터, 보고, 초딩, 영화, 줄, 오버, 연기, 조차, 가볍지, 않구나]
2                     [너, 무재, 밓었, 다그, 래서, 보는것을, 추천, 한]
3                  [교도소, 이야기, 구먼, 솔직히, 재미, 없다, 평점, 조정]
4    [사이, 몬페, 그, 익살스런, 연기, 돋보였던, 영화, 스파이더맨, 에서, 늙어,...
Name: document, dtype: object

 

 

from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

 

 

oov_tok = "<OOV>"
vovab_size = 15000
tokenizer = Tokenizer(oov_token = oov_tok, num_words=vocab_size)
tokenizer.fit_on_texts(data)

 


테스트 데이터 전처리
1. 한글, 영문, 공백 제외한 모든 문자 제거
2. 결측값 제거
3. 테스트할 데이터, 레이블 데이터 분리
4. 테스트할 데이터 불용어 부분제거
5. tokenizer를 이용하여 분석할 수 있는 데이터로 변경
6. 패딩하기

import numpy as np
def preprocessing(df) :
    df['document'] = df['document'].str.replace("[^A-Za-z가-힣ㄱ-ㅎㅏ-ㅣ]","")
    df = df.dropna()
    test_label = np.asarray(df['label'])
    test_data = df['document'].apply((lambda x : word_tokenization(x)))
    test_data = tokenizer.texts_to_sequences(test_data)
    test_data = pad_sequences(test_data, padding='post', maxlen=69)
    return test_data, test_label

 

 

test_data, test_label = preprocessing(test)
test_data[2:3]
test_label[2:3]


# array([0], dtype=int64)

 

 

# 평가
model2 = create_model()
model2.evaluate(test_data, test_label)



1563/1563 [==============================] - 6s 3ms/step - loss: 0.6931 - accuracy: 0.5039
[0.6931077837944031, 0.5039322972297668]

 

 

# 저장된 모델을 로드 후 평가하기
checkpoint_path = 'best_performed_model.ckpt'
model2.load_weights(checkpoint_path)
model2.evaluate(test_data, test_label)



1563/1563 [==============================] - 4s 3ms/step - loss: 1.1676 - accuracy: 0.4926
[1.1676305532455444, 0.49262505769729614]

 

print("감동 ==>>", tokenizer.word_index['감동'])
print("영화 ==>>", tokenizer.word_index['영화'])
print("나나 ==>>", tokenizer.word_index['나나'])



감동 ==>> 28
영화 ==>> 2
나나 ==>> 3533

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

61. 네이버 영화리뷰 || LSTM  (0) 2021.12.07
60. LSTM 기본  (0) 2021.12.07
58. IMDB || SimpleRNN  (0) 2021.12.07
57. seed || simpleRNN  (0) 2021.12.07
56. 영화리뷰 분석  (0) 2021.12.07
728x90
반응형

네이버 영화 리뷰 데이터

import pandas as pd 
import numpy as np 
import seaborn as sns 
import matplotlib.pyplot as plt 
import tensorflow as tf

 

train_file = tf.keras.utils.get_file('ratings_train.txt', \ 
origin='https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt',\ 
extract=True) 








Downloading data from https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt 
14630912/14628807 [==============================] - 0s 0us/step

 

train = pd.read_csv(train_file, sep='\t') 
train.head() 
print("train shape:", train.shape) 


# train shape: (150000, 3)

레이블별 갯수 출력하기


train['label'].value_counts() 
sns.countplot(x='label', data=train)

결측값


train.isnull().sum() 
train[train['document'].isnull()] 
# 결측값 제거 
train = train.dropna() 
print("train shape:",train.shape) 




# train shape: (149995, 3)


레이블 별 글자수의 분포를 히스토그램으로 출력

# 레이블 별 글자수의 분포를 히스토그램으로 출력 
# 긍정의 글자수 
postive_len = train[train['label']==1]['document'].str.len() 
# 부정의 글자수 
negative_len = train[train['label']==0]['document'].str.len() 
postive_len.iloc[:10] 




1 33 4 61 8 22 9 45 10 16 11 43 13 51 15 16 16 64 18 45 Name: document, dtype: int64

 

fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10,5)) 
ax1.hist(postive_len) 
ax1.set_title("positive") 
ax2.hist(negative_len) 
ax2.set_title("negative") 
fig.suptitle("Number of characters") 
plt.show()

형태소 분석하기

text = '한글 자연어 처리는 재밌다. 이제부터 열심히 해야지ㅎㅎㅎㅎ' 
okt.morphs(text)
# 형태소 분리 
okt.morphs(text, stem=True)
# 행태소로 분리, 어간 추출 
okt.nouns(text)
# 명사만 추출 
okt.phrases(text)
# 어절까지추출 
okt.pos(text) 
# 품살르 붙여서 형태소 분석



[('한글', 'Noun'), ('자연어', 'Noun'), ('처리', 'Noun'), ('는', 'Josa'), ('재밌다', 'Adjective'), ('.', 'Punctuation'), ('이제', 'Noun'), ('부터', 'Josa'), ('열심히', 'Adverb'), ('해야지', 'Verb'), ('ㅎㅎㅎㅎ', 'KoreanParticle')]

 

# 데이터 전처리

# 텍스트의 내용 중 한글, 영문, 공백을 제외한 다른 문자들은 제거

train['document'] = \ train['document'].str.replace("[^A-Za-z가-힣ㄱ-ㅎㅏ-ㅣ]","") 
train['document'].head() 


0 아더빙진짜짜증나네요목소리 
1 흠포스터보고초딩영화줄오버연기조차가볍지않구나 
2 너무재밓었다그래서보는것을추천한다 
3 교도소이야기구먼솔직히재미는없다평점조정 
4 사이몬페그의익살스런연기가돋보였던영화스파이더맨에서늙어보이기만했던커스틴던스트가너무나도이... 
Name: document, dtype: object


형태소 분석. stopword 제거 후 형태소 분석하기

def word_tokenization(text) : 
    # 한글 불용어 
    stop_words = ['는','을','를','이','가','의','던','고','하','다','은','에','들','지','게','도'] 
    return [word for word in okt.morphs(text) if word not in stop_words]

 

start = time.time() 
data = train['document'].apply((lambda x : word_tokenization(x))) 
print("실행시간 :", time.time()-start) 
data.head() 



실행시간 : 1631.960827589035 
0 [아더, 빙, 진짜, 짜증나네요, 목소리] 
1 [흠, 포스터, 보고, 초딩, 영화, 줄, 오버, 연기, 조차, 가볍지, 않구나] 
2 [너, 무재, 밓었, 다그, 래서, 보는것을, 추천, 한] 
3 [교도소, 이야기, 구먼, 솔직히, 재미, 없다, 평점, 조정] 
4 [사이, 몬페, 그, 익살스런, 연기, 돋보였던, 영화, 스파이더맨, 에서, 늙어,... 
Name: document, dtype: object

 

from tensorflow.keras.preprocessing.text import Tokenizer 
from tensorflow.keras.preprocessing.sequence 
import pad_sequences 
tokenizer = Tokenizer() tokenizer.fit_on_texts(data) 
print("총 단어 갯수:", len(tokenizer.word_index)) 
# 총 단어 갯수: 122402


5회 이상만 vocab_size 에 포함

def get_vocab_size(threshold) : 
	cnt = 0 
    for x in tokenizer.word_counts.values() : 
    	if x >=threshold : 
        	cnt += 1 
    return cnt

 

vocab_size = get_vocab_size(5) 

# 5회 이상 출현단어 

print("vocab_size:",vocab_size) 
# vocab_size: 23384

 

훈련 데이터 검증데이터 분리

training_size = 120000 
#train 분할 
train_sentences = data[:training_size] 
valid_sentences = data[training_size:] 
# label 분할 
train_labels = train['label'][:training_size] 
valid_labels = train['label'][training_size:]

 

print(train_sentences.shape) 
print(valid_sentences.shape) 


(120000,) 

(29995,)


토큰화하기

oov_tok = "<OOV>" 
vovab_size = 15000 
tokenizer = Tokenizer(oov_token = oov_tok, num_words=vocab_size) 
tokenizer.fit_on_texts(data) print("단어사전개수:",len(tokenizer.word_counts)) 
# 단어사전개수: 122402


문자를 숫자로 표현

print(train_sentences[:2]) 
train_sequences = tokenizer.texts_to_sequences(train_sentences) 
valid_sequences = tokenizer.texts_to_sequences(valid_sentences) 
print(train_sequences[:2]) print(valid_sequences[:2]) 

0 [아더, 빙, 진짜, 짜증나네요, 목소리] 
1 [흠, 포스터, 보고, 초딩, 영화, 줄, 오버, 연기, 조차, 가볍지, 않구나] 
Name: document, dtype: object 
[[13657, 16287, 8, 6835, 615], [1005, 423, 33, 554, 2, 354, 1539, 20, 1044, 6416, 1]] [[277, 9, 299, 208, 3401, 23332, 860, 9, 908, 178, 11017, 877, 3, 156, 48], [381, 158, 2, 487, 193, 2437, 38, 57, 259, 10630, 1, 33, 13153, 2, 72]]


문자의 최대길이

max_length = max(len(x) for x in train_sequences) 
print("문자 최대 길이:", max_length) 

# 문자 최대 길이: 69

 

train_padded = pad_sequences(train_sequences, padding = 'post', maxlen=max_length) 
valid_padded = pad_sequences(valid_sequences, padding = 'post', maxlen=max_length) 
train_padded[:1] 


array([[13657, 16287, 8, 6835, 615, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

 

모델 구성하기

import tensorflow as tf 
from tensorflow.keras import Sequential 
from tensorflow.keras.layers import Dense, LSTM, Embedding, Bidirectional

 

def create_model() :
	# bi 양방향 rnn 
	# return_sequences=True 상대값 전달 
	model = Sequential([ Embedding(vocab_size, 32), 
                     Bidirectional(LSTM(32, return_sequences=True)), 
                     Dense(32, activation='relu'), 
                     Dense(1, activation='sigmoid') ]) 

	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) 
    return model

 

model = create_model()
model.summary()


Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding (Embedding)        (None, None, 32)          748288    
_________________________________________________________________
bidirectional_1 (Bidirection (None, None, 64)          16640     
_________________________________________________________________
dense_2 (Dense)              (None, None, 32)          2080      
_________________________________________________________________
dense_3 (Dense)              (None, None, 1)           33        
=================================================================
Total params: 767,041
Trainable params: 767,041
Non-trainable params: 0
_________________________________________________________________


딥러닝 모델의 구조가 복잡하고, 데이터 크기가 클수록 학습시간이 오래걸림.
=> 오랜시간 학습한 모델을 저장할 필요가 있음. modelcheckpoint 함수 이용
save_weights_only=True : weight 만저장
save_weights_only=False : 모델 레이터와 weight 모두 저장
save_best_only = True : 좋은 가중치만 저장
save_best_only = False : 모든 가중치 저장

checkpoint_path = 'best_performed_model.ckpt'
checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
save_best_only = True,
monitor = 'val_loss', verbose=1)


earlystopping 함수 : 모델의 성능이 개선되지 않을 경우 학습을 중단
monitor = 'val_loss' : 성능평가 기준
patience=2 : 2 epochs 까지 개선되지 않으면 중지
call back : 함수에서 호출되는 함수

early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',patience=2)

 

학습하기

history = model.fit(train_padded, train_labels, validation_data=(valid_padded, valid_labels),
                   callbacks=[early_stop, checkpoint],
                   batch_size=64, epochs=10, verbose=2)
                   
                   
Epoch 1/10
1875/1875 - 31s - loss: 0.4138 - accuracy: 0.8022 - val_loss: 0.3530 - val_accuracy: 0.8437

Epoch 00001: val_loss improved from inf to 0.35297, saving model to best_performed_model.ckpt
Epoch 2/10
1875/1875 - 24s - loss: 0.3111 - accuracy: 0.8652 - val_loss: 0.3561 - val_accuracy: 0.8424

Epoch 00002: val_loss did not improve from 0.35297
Epoch 3/10
1875/1875 - 24s - loss: 0.2710 - accuracy: 0.8824 - val_loss: 0.3645 - val_accuracy: 0.8445

Epoch 00003: val_loss did not improve from 0.35297

 

def plot_graphs(history, metric) :
    plt.plot(history.history[metric])
    plt.plot(history.history['val_'+metric], '')
    plt.xlabel('Epochs')
    plt.ylabel(metric)
    plt.legend([metric, 'val_'+metric])
    plt.show()

 

plot_graphs(history, 'accuracy') 
plot_graphs(history, 'loss')

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

62. Tokenizer  (0) 2021.12.07
60. LSTM 기본  (0) 2021.12.07
58. IMDB || SimpleRNN  (0) 2021.12.07
57. seed || simpleRNN  (0) 2021.12.07
56. 영화리뷰 분석  (0) 2021.12.07
728x90
반응형

Long Short Term Memory
한글 분석하기
한글 형태소 분석기
kkma
komoran
okt
hannanum

 

 

import konlpy
from konlpy.tag import Kkma, Komoran, Okt, Hannanum
import time
kkma = Kkma()
komoran = Komoran()
okt = Okt()
hannanum = Hannanum()

 

def sample_ko_pos(text) :
    print(f"==={text}===")
    start = time.time()
    print("kkma:",kkma.pos(text),",실행시간:",time.time()-start)
    start = time.time()
    print("komoran:",komoran.pos(text),",실행시간:",time.time()-start)
    start = time.time()
    print("okt:",okt.pos(text),",실행시간:",time.time()-start)
    start = time.time()
    print("hannanum:",hannanum.pos(text),",실행시간:",time.time()-start)
    print('\n')

 

# 띄어쓰기 가 올바르지 않은 문장
text1 = '영실아안녕오늘날씨어때?'
sample_ko_pos(text1)

===영실아안녕오늘날씨어때?===
kkma: [('영', 'MAG'), ('싣', 'VV'), ('아', 'ECD'), ('안녕', 'NNG'), ('오늘날', 'NNG'), ('씨', 'VV'), ('어', 'ECD'), ('때', 'NNG'), ('?', 'SF')] ,실행시간: 3.219935178756714
komoran: [('영', 'NNP'), ('실', 'NNP'), ('아', 'NNP'), ('안녕', 'NNP'), ('오늘날', 'NNP'), ('씨', 'NNB'), ('어떻', 'VA'), ('어', 'EF'), ('?', 'SF')] ,실행시간: 0.004001617431640625
okt: [('영', 'Modifier'), ('실아', 'Noun'), ('안녕', 'Noun'), ('오늘날', 'Noun'), ('씨', 'Suffix'), ('어때', 'Adjective'), ('?', 'Punctuation')] ,실행시간: 2.025106191635132
hannanum: [('영실아안녕오늘날씨어때', 'N'), ('?', 'S')] ,실행시간: 0.5730204582214355

 

# 오타가 있는 문장
text2 = '안녕ㅎㅏㅅㅔ여 ㅈㅓ는ㄷㅐ학생 입니다.'
sample_ko_pos(text2)


===안녕ㅎㅏㅅㅔ여 ㅈㅓ는ㄷㅐ학생 입니다.===
kkma: [('안녕ㅎㅏㅅㅔ', 'UN'), ('여', 'JKI'), ('ㅈ', 'NNG'), ('ㅓ', 'UN'), ('는', 'JX'), ('ㄷ', 'NNG'), ('ㅐ', 'UN'), ('학생', 'NNG'), ('이', 'VCP'), ('ㅂ니다', 'EFN'), ('.', 'SF')] ,실행시간: 0.0388941764831543
komoran: [('안녕', 'NNP'), ('하', 'NNP'), ('세', 'NNB'), ('이', 'VCP'), ('어', 'EC'), ('저', 'NP'), ('는', 'JX'), ('대학생', 'NNG'), ('이', 'VCP'), ('ㅂ니다', 'EF'), ('.', 'SF')] ,실행시간: 0.0010001659393310547
okt: [('안녕', 'Noun'), ('ㅎㅏㅅㅔ', 'KoreanParticle'), ('여', 'Noun'), ('ㅈㅓ', 'KoreanParticle'), ('는', 'Verb'), ('ㄷㅐ', 'KoreanParticle'), ('학생', 'Noun'), ('입니다', 'Adjective'), ('.', 'Punctuation')] ,실행시간: 0.00400090217590332
hannanum: [('안녕ㅎㅏㅅㅔ', 'N'), ('이', 'J'), ('어', 'E'), ('ㅈㅓ는ㄷㅐ학생', 'N'), ('일', 'P'), ('ㅂ니다', 'E'), ('.', 'S')] ,실행시간: 0.0020003318786621094

 

text3 = "정말 재미있고 매력적인 영화에요 추천합니다."
sample_ko_pos(text3)

===정말 재미있고 매력적인 영화에요 추천합니다.===
kkma: [('정말', 'MAG'), ('재미있', 'VA'), ('고', 'ECE'), ('매력적', 'NNG'), ('이', 'VCP'), ('ㄴ', 'ETD'), ('영화', 'NNG'), ('에', 'JKM'), ('요', 'JX'), ('추천', 'NNG'), ('하', 'XSV'), ('ㅂ니다', 'EFN'), ('.', 'SF')] ,실행시간: 0.01355123519897461
komoran: [('정말', 'MAG'), ('재미있', 'VA'), ('고', 'EC'), ('매력', 'NNG'), ('적', 'XSN'), ('이', 'VCP'), ('ㄴ', 'ETM'), ('영화', 'NNG'), ('에', 'JKB'), ('요', 'JX'), ('추천', 'NNG'), ('하', 'XSV'), ('ㅂ니다', 'EF'), ('.', 'SF')] ,실행시간: 0.002000093460083008
okt: [('정말', 'Noun'), ('재미있고', 'Adjective'), ('매력', 'Noun'), ('적', 'Suffix'), ('인', 'Josa'), ('영화', 'Noun'), ('에요', 'Josa'), ('추천', 'Noun'), ('합니다', 'Verb'), ('.', 'Punctuation')] ,실행시간: 0.009005308151245117
hannanum: [('정말', 'M'), ('재미있', 'P'), ('고', 'E'), ('매력적', 'N'), ('이', 'J'), ('ㄴ', 'E'), ('영화', 'N'), ('이', 'J'), ('에요', 'E'), ('추천', 'N'), ('하', 'X'), ('ㅂ니다', 'E'), ('.', 'S')] ,실행시간: 0.003000497817993164

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

62. Tokenizer  (0) 2021.12.07
61. 네이버 영화리뷰 || LSTM  (0) 2021.12.07
58. IMDB || SimpleRNN  (0) 2021.12.07
57. seed || simpleRNN  (0) 2021.12.07
56. 영화리뷰 분석  (0) 2021.12.07
728x90
반응형

# imdb 데이터셋을 simple rnn으로 분석하기

from tensorflow.keras.datasets import imdb
num_words = 10000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = num_words)
X_train.shape

# (25000,)

 

# 패딩 
from tensorflow.keras.preprocessing.sequence import pad_sequences
max_len = 500
pad_X_train = pad_sequences(X_train, maxlen = max_len)
pad_X_test = pad_sequences(X_test, maxlen = max_len)

 

# 분석하기
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense, Embedding

return_sequences : 기본값 false, 출력값을 반환여부, 순환값
dropout = 입력값을 drop 시키는 비율
recurrent_dropout : 순환값 drop 시키는 비율

 

model = Sequential()
model.add(Embedding(input_dim = num_words, output_dim = 32))
model.add(SimpleRNN(32, return_sequences = True, dropout = 0.15, recurrent_dropout = 0.15))
model.add(SimpleRNN(32))
model.add(Dense(1, activation = 'sigmoid'))

 

model.compile(optimizer = 'adam', loss='binary_crossentropy', metrics = ['acc'])
model.summary()

Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding (Embedding)        (None, None, 32)          320000    
_________________________________________________________________
simple_rnn (SimpleRNN)       (None, None, 32)          2080      
_________________________________________________________________
simple_rnn_1 (SimpleRNN)     (None, 32)                2080      
_________________________________________________________________
dense (Dense)                (None, 1)                 33        
=================================================================
Total params: 324,193
Trainable params: 324,193
Non-trainable params: 0
_________________________________________________________________

 

 

history = model.fit(pad_X_train, y_train, batch_size = 32, epochs = 15, validation_split = 0.2)



Epoch 1/15
625/625 [==============================] - 43s 67ms/step - loss: 0.6911 - acc: 0.5356 - val_loss: 0.6146 - val_acc: 0.6862
Epoch 2/15
625/625 [==============================] - 41s 66ms/step - loss: 0.5349 - acc: 0.7323 - val_loss: 0.4432 - val_acc: 0.7986
Epoch 3/15
625/625 [==============================] - 42s 66ms/step - loss: 0.4389 - acc: 0.8065 - val_loss: 0.6516 - val_acc: 0.6824
Epoch 4/15
625/625 [==============================] - 42s 68ms/step - loss: 0.5697 - acc: 0.7071 - val_loss: 0.5844 - val_acc: 0.6994
Epoch 5/15
625/625 [==============================] - 42s 67ms/step - loss: 0.5356 - acc: 0.7357 - val_loss: 0.6082 - val_acc: 0.6658
Epoch 6/15
625/625 [==============================] - 42s 67ms/step - loss: 0.5133 - acc: 0.7520 - val_loss: 0.8289 - val_acc: 0.5406
Epoch 7/15
625/625 [==============================] - 41s 66ms/step - loss: 0.5330 - acc: 0.7317 - val_loss: 0.5149 - val_acc: 0.7470
Epoch 8/15
625/625 [==============================] - 41s 65ms/step - loss: 0.5198 - acc: 0.7435 - val_loss: 0.5873 - val_acc: 0.6998
Epoch 9/15
625/625 [==============================] - 41s 66ms/step - loss: 0.4812 - acc: 0.7720 - val_loss: 0.5361 - val_acc: 0.7388
Epoch 10/15
625/625 [==============================] - 41s 65ms/step - loss: 0.4684 - acc: 0.7818 - val_loss: 0.5729 - val_acc: 0.6942
Epoch 11/15
625/625 [==============================] - 42s 67ms/step - loss: 0.4367 - acc: 0.8001 - val_loss: 0.5292 - val_acc: 0.7416
Epoch 12/15
625/625 [==============================] - 42s 68ms/step - loss: 0.4064 - acc: 0.8243 - val_loss: 0.5065 - val_acc: 0.7660
Epoch 13/15
625/625 [==============================] - 42s 66ms/step - loss: 0.3777 - acc: 0.8399 - val_loss: 0.5160 - val_acc: 0.7320
Epoch 14/15
625/625 [==============================] - 42s 67ms/step - loss: 0.4170 - acc: 0.8135 - val_loss: 0.6287 - val_acc: 0.6220
Epoch 15/15
625/625 [==============================] - 43s 68ms/step - loss: 0.4658 - acc: 0.7863 - val_loss: 0.5528 - val_acc: 0.7462

 

import matplotlib.pyplot as plt
his_dict = history.history
loss = his_dict['loss']
val_loss = his_dict['val_loss']
epochs = range(1, len(loss)+1)
fig = plt.figure(figsize=(10,5))
# 훈련 손실 그리기
ax1 = fig.add_subplot(1,2,1)
ax1.plot(epochs, loss, color='blue', label='train_loss')
ax1.plot(epochs, val_loss, color='orange', label='val_loss')
ax1.set_title('train and val loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()
# 정확도 
acc = his_dict['acc']
val_acc = his_dict['val_acc']

ax2 = fig.add_subplot(1,2,1)
ax2.plot(epochs, acc, color='blue', label='train_acc')
ax2.plot(epochs, val_acc, color='orange', label='val_acc')
ax2.set_title('train and val acc')
ax2.set_xlabel('epochs')
ax2.set_ylabel('acc')
ax2.legend()
plt.show()

import matplotlib.pyplot as plt
plt.figure(figsize = (12, 4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b-', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history.history['acc'], 'g-', label='acc')
plt.plot(history.history['val_acc'], 'k--', label='val_acc')
plt.xlabel('Epoch')
plt.legend()
plt.show()

simple 결론
embedding 층만 사용한 모델과 정확도에 차이가 없다.
simpleRNN 층을 사용하는 경우, 긴문장의 데이터 처리에 문제가 있다
앞부분을 상태가 기억하지만, 시점이 지날수록 기억이 없어진다

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

61. 네이버 영화리뷰 || LSTM  (0) 2021.12.07
60. LSTM 기본  (0) 2021.12.07
57. seed || simpleRNN  (0) 2021.12.07
56. 영화리뷰 분석  (0) 2021.12.07
54. glob-clothes || conv 다중 분류  (0) 2021.12.07
728x90
반응형

rnn
embedding 층은 데이터 표현을 학습하여 데이터 사전 구축하여 이해하는 정도
단어의 순서와 맥락까지 고려해야함 => embedding 층에서는 부족함
simple rnn 사용하기

 

# cos 함수를 이용해 데이터 만들기
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(2020)
time = np.arange(30*12+1)
month_time = (time%30)/30
time_series = 20*np.where(month_time < 0.5,
                         np.cos(2*np.pi*month_time),
                         np.cos(2*np.pi*month_time) +np.random.random(361))
plt.figure(figsize = (10, 5))
plt.title('timeseries data')
plt.xlabel('time')
plt.ylabel('value')
plt.plot(np.arange(0, 30*11+1), time_series[:30*11+1], color = 'black', alpha = 0.7, label = 'train')
plt.plot(np.arange(30*11, 30*12+1), time_series[30*11:], color = 'orange', label = 'test')
plt.legend()
plt.show()

 

print(time_series.shape)

# (361,)

# make_sequence : test_arr 배열을 10개씩 전달, 바로 다음의 값을 결과

def make_sequence(time_series, n) :
    x_train, y_train = list(), list()
    for i in range(len(time_series)) :
        x = time_series[i:(i+n)]
        if ( i + n ) < len(time_series) :
            x_train.append(x)
            y_train.append(time_series[i+n])
        else :
            break
    return np.array(x_train), np.array(y_train)

 

n = 10
x_train, y_train = make_sequence(time_series, n)
x_train = x_train.reshape(-1, n, 1)
y_train = y_train.reshape(-1, 1)
patial_x_train = x_train[:30*11]
patial_y_train = y_train[:30*11]
x_test = x_train[30*11:]
y_test = y_train[30*11:]
print("학습데이터", patial_x_train.shape, ",", patial_y_train.shape)
print("테스트데이터", x_test.shape, ",", y_test.shape)

# 학습데이터 (330, 10, 1) , (330, 1)
# 테스트데이터 (21, 10, 1) , (21, 1)

 

test_arr = np.arange(100) # 0~99까지 숫자
a, b = make_sequence(test_arr, 10)
for i in range(1, 4) :
    print(a[i], '|', b[i])
    
# [ 1  2  3  4  5  6  7  8  9 10] | 11
# [ 2  3  4  5  6  7  8  9 10 11] | 12
# [ 3  4  5  6  7  8  9 10 11 12] | 13

 

# simpleRNN
from tensorflow.keras.layers import SimpleRNN, Flatten, Dense
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(SimpleRNN(units = 32, activation = 'tanh', input_shape = (n, 1)))
model.add(Dense(1, activation = 'linear'))
model.compile(optimizer = 'adam', loss='mse')
model.summary()

Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
simple_rnn_1 (SimpleRNN)     (None, 32)                1088      
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 33        
=================================================================
Total params: 1,121
Trainable params: 1,121
Non-trainable params: 0
_________________________________________________________________

 

history = model.fit(patial_x_train, patial_y_train, epochs =100, batch_size = 12)



Epoch 1/100
28/28 [==============================] - 0s 852us/step - loss: 182.2196
Epoch 2/100
28/28 [==============================] - 0s 830us/step - loss: 153.3062
Epoch 3/100
28/28 [==============================] - 0s 811us/step - loss: 131.4881
Epoch 4/100
28/28 [==============================] - 0s 860us/step - loss: 115.1706
Epoch 5/100
28/28 [==============================] - 0s 868us/step - loss: 103.9888
Epoch 6/100
28/28 [==============================] - 0s 833us/step - loss: 96.0756
Epoch 7/100
28/28 [==============================] - 0s 870us/step - loss: 89.4670
Epoch 8/100
28/28 [==============================] - 0s 839us/step - loss: 83.7613
Epoch 9/100
28/28 [==============================] - 0s 854us/step - loss: 79.0363
Epoch 10/100
28/28 [==============================] - 0s 883us/step - loss: 74.6907
Epoch 11/100
28/28 [==============================] - 0s 822us/step - loss: 71.2030
Epoch 12/100
28/28 [==============================] - 0s 838us/step - loss: 67.9915
Epoch 13/100
28/28 [==============================] - 0s 852us/step - loss: 65.1543
Epoch 14/100
28/28 [==============================] - 0s 815us/step - loss: 62.7451
Epoch 15/100
28/28 [==============================] - 0s 776us/step - loss: 60.4991
Epoch 16/100
28/28 [==============================] - 0s 864us/step - loss: 58.4392
Epoch 17/100
28/28 [==============================] - 0s 846us/step - loss: 56.7270
Epoch 18/100
28/28 [==============================] - 0s 815us/step - loss: 55.2900
Epoch 19/100
28/28 [==============================] - 0s 839us/step - loss: 53.6978
Epoch 20/100
28/28 [==============================] - 0s 847us/step - loss: 52.3777
Epoch 21/100
28/28 [==============================] - 0s 813us/step - loss: 50.8633
Epoch 22/100
28/28 [==============================] - 0s 855us/step - loss: 49.2698
Epoch 23/100
28/28 [==============================] - 0s 855us/step - loss: 47.7046
Epoch 24/100
28/28 [==============================] - 0s 816us/step - loss: 46.1788
Epoch 25/100
28/28 [==============================] - 0s 886us/step - loss: 44.7304
Epoch 26/100
28/28 [==============================] - 0s 884us/step - loss: 43.1118
Epoch 27/100
28/28 [==============================] - 0s 863us/step - loss: 41.3034
Epoch 28/100
28/28 [==============================] - 0s 839us/step - loss: 39.7947
Epoch 29/100
28/28 [==============================] - 0s 837us/step - loss: 38.8360
Epoch 30/100
28/28 [==============================] - 0s 872us/step - loss: 37.7363
Epoch 31/100
28/28 [==============================] - 0s 823us/step - loss: 36.1102
Epoch 32/100
28/28 [==============================] - 0s 815us/step - loss: 34.8467
Epoch 33/100
28/28 [==============================] - 0s 820us/step - loss: 34.1782
Epoch 34/100
28/28 [==============================] - 0s 877us/step - loss: 33.5438
Epoch 35/100
28/28 [==============================] - 0s 820us/step - loss: 33.0707
Epoch 36/100
28/28 [==============================] - 0s 858us/step - loss: 31.4599
Epoch 37/100
28/28 [==============================] - 0s 820us/step - loss: 30.7163
Epoch 38/100
28/28 [==============================] - 0s 812us/step - loss: 30.3554
Epoch 39/100
28/28 [==============================] - 0s 848us/step - loss: 28.9213
Epoch 40/100
28/28 [==============================] - 0s 815us/step - loss: 28.3957
Epoch 41/100
28/28 [==============================] - 0s 894us/step - loss: 28.0743
Epoch 42/100
28/28 [==============================] - 0s 848us/step - loss: 26.7403
Epoch 43/100
28/28 [==============================] - 0s 815us/step - loss: 26.5320
Epoch 44/100
28/28 [==============================] - 0s 852us/step - loss: 26.0540
Epoch 45/100
28/28 [==============================] - 0s 828us/step - loss: 25.1309
Epoch 46/100
28/28 [==============================] - 0s 816us/step - loss: 24.8194
Epoch 47/100
28/28 [==============================] - 0s 814us/step - loss: 24.5361
Epoch 48/100
28/28 [==============================] - 0s 815us/step - loss: 24.0815
Epoch 49/100
28/28 [==============================] - 0s 852us/step - loss: 23.8737
Epoch 50/100
28/28 [==============================] - 0s 854us/step - loss: 23.3372
Epoch 51/100
28/28 [==============================] - 0s 807us/step - loss: 22.5951
Epoch 52/100
28/28 [==============================] - 0s 820us/step - loss: 23.4359
Epoch 53/100
28/28 [==============================] - 0s 872us/step - loss: 22.5156
Epoch 54/100
28/28 [==============================] - 0s 843us/step - loss: 22.3894
Epoch 55/100
28/28 [==============================] - 0s 841us/step - loss: 23.1800
Epoch 56/100
28/28 [==============================] - 0s 805us/step - loss: 21.9609
Epoch 57/100
28/28 [==============================] - 0s 857us/step - loss: 21.7087
Epoch 58/100
28/28 [==============================] - 0s 813us/step - loss: 21.4336
Epoch 59/100
28/28 [==============================] - 0s 823us/step - loss: 21.2400
Epoch 60/100
28/28 [==============================] - 0s 811us/step - loss: 20.8000
Epoch 61/100
28/28 [==============================] - 0s 835us/step - loss: 20.4084
Epoch 62/100
28/28 [==============================] - 0s 829us/step - loss: 20.7629
Epoch 63/100
28/28 [==============================] - 0s 796us/step - loss: 20.1678
Epoch 64/100
28/28 [==============================] - 0s 842us/step - loss: 20.4220
Epoch 65/100
28/28 [==============================] - 0s 818us/step - loss: 20.3210
Epoch 66/100
28/28 [==============================] - 0s 862us/step - loss: 19.6332
Epoch 67/100
28/28 [==============================] - 0s 856us/step - loss: 19.9926
Epoch 68/100
28/28 [==============================] - 0s 844us/step - loss: 19.3556
Epoch 69/100
28/28 [==============================] - 0s 852us/step - loss: 18.9739
Epoch 70/100
28/28 [==============================] - 0s 814us/step - loss: 19.1395
Epoch 71/100
28/28 [==============================] - 0s 856us/step - loss: 18.9837
Epoch 72/100
28/28 [==============================] - 0s 840us/step - loss: 18.8401
Epoch 73/100
28/28 [==============================] - 0s 798us/step - loss: 18.5658
Epoch 74/100
28/28 [==============================] - 0s 829us/step - loss: 19.0316
Epoch 75/100
28/28 [==============================] - 0s 818us/step - loss: 18.3579
Epoch 76/100
28/28 [==============================] - 0s 807us/step - loss: 17.9954
Epoch 77/100
28/28 [==============================] - 0s 852us/step - loss: 17.8692
Epoch 78/100
28/28 [==============================] - 0s 800us/step - loss: 17.5699
Epoch 79/100
28/28 [==============================] - 0s 848us/step - loss: 17.3456
Epoch 80/100
28/28 [==============================] - 0s 857us/step - loss: 17.2581
Epoch 81/100
28/28 [==============================] - 0s 878us/step - loss: 17.3021
Epoch 82/100
28/28 [==============================] - 0s 828us/step - loss: 17.3987
Epoch 83/100
28/28 [==============================] - 0s 855us/step - loss: 16.8744
Epoch 84/100
28/28 [==============================] - 0s 852us/step - loss: 16.5918
Epoch 85/100
28/28 [==============================] - 0s 822us/step - loss: 16.5154
Epoch 86/100
28/28 [==============================] - 0s 838us/step - loss: 16.2934
Epoch 87/100
28/28 [==============================] - 0s 837us/step - loss: 16.2925
Epoch 88/100
28/28 [==============================] - 0s 854us/step - loss: 16.3128
Epoch 89/100
28/28 [==============================] - 0s 809us/step - loss: 15.9899
Epoch 90/100
28/28 [==============================] - 0s 852us/step - loss: 15.8265
Epoch 91/100
28/28 [==============================] - 0s 838us/step - loss: 16.3135
Epoch 92/100
28/28 [==============================] - 0s 825us/step - loss: 16.3944
Epoch 93/100
28/28 [==============================] - 0s 823us/step - loss: 15.5170
Epoch 94/100
28/28 [==============================] - 0s 804us/step - loss: 15.0679
Epoch 95/100
28/28 [==============================] - 0s 825us/step - loss: 14.9712
Epoch 96/100
28/28 [==============================] - 0s 810us/step - loss: 14.9446
Epoch 97/100
28/28 [==============================] - 0s 820us/step - loss: 14.4544
Epoch 98/100
28/28 [==============================] - 0s 843us/step - loss: 14.6256
Epoch 99/100
28/28 [==============================] - 0s 815us/step - loss: 14.4208
Epoch 100/100
28/28 [==============================] - 0s 820us/step - loss: 14.1855

 

pred = model.predict(x_test)
pred_range = np.arange(len(y_train), len(y_train) + len(pred))
plt.figure(figsize = (15, 5))
plt.title('prediction')
plt.xlabel('time') ; plt.ylabel('value')
plt.plot(pred_range, y_test.reshape(-1,), color='orange', label='ground-truth')
plt.plot(pred_range, pred.reshape(-1,), color='blue', label='prediction')
plt.legend()
plt.show()

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

60. LSTM 기본  (0) 2021.12.07
58. IMDB || SimpleRNN  (0) 2021.12.07
56. 영화리뷰 분석  (0) 2021.12.07
54. glob-clothes || conv 다중 분류  (0) 2021.12.07
53. glob-clothes || 데이터셋만들기  (0) 2021.12.07
728x90
반응형
# imdb 리뷰 5만개, 50% 긍정, 부정, 전처리 완료 : 내용이숫자화
from tensorflow.keras.datasets import imdb
num_words = 10000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words = num_words)
print(x_train.shape, x_test.shape)

# (25000,) (25000,)

 

print(x_train[0])

[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]

 

igwi = {}
for key, value in imdb.get_word_index().items() :
    igwi[value] = key
for i in range(1, 6) :
    print('{}번째 가장 높은 빈도단어는 {}'.format(i, igwi[i]))
    
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json
1646592/1641221 [==============================] - 0s 0us/step
1번째 가장 높은 빈도단어는 the
2번째 가장 높은 빈도단어는 and
3번째 가장 높은 빈도단어는 a
4번째 가장 높은 빈도단어는 of
5번째 가장 높은 빈도단어는 to

 

print(y_train[:10])

# [1 0 0 1 0 0 1 0 1 0]

 

import numpy as np
lengths = np.array([len(x) for x in x_train])
print(np.mean(lengths), np.median(lengths))

# 238.71364 178.0

 

# 히스토그램으로 단어 갯수
import matplotlib.pyplot as plt
plt.hist(lengths)
plt.xlabel('length')
plt.ylabel('frequency')
plt.show()

# padding
from tensorflow.keras.preprocessing.sequence import pad_sequences
a1 = [[1,2,3]]
a2 = [[1,2,3,4,5,6,7,8]]
a1_post = pad_sequences(a1, maxlen=5, padding = 'post')
a2_post = pad_sequences(a2, maxlen=5, padding = 'post')
print(a1_post)
print(a2_post)

# [[1 2 3 0 0]]
# [[4 5 6 7 8]]

 

# 분석을 위해 데이터의 길이를 통일하게 처리 : padding
# 패딩 : 데이터 길이기 지정 길이보다 짧으면 0으로 채움
from tensorflow.keras.preprocessing.sequence import pad_sequences
max_len = 500
print('befor pad', len(x_train[0])) # 218
# pre 앞쪽을 0으로
pad_x_train = pad_sequences(x_train, maxlen = max_len, padding = 'pre')
pad_x_test = pad_sequences(x_test, maxlen = max_len, padding = 'pre')
print('after pad', len(pad_x_train[0]))

# befor pad 218
# after pad 500

 

print('after pad',pad_x_train[0])

after pad [   0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    1   14   22   16   43  530  973 1622 1385   65  458 4468
   66 3941    4  173   36  256    5   25  100   43  838  112   50  670
    2    9   35  480  284    5  150    4  172  112  167    2  336  385
   39    4  172 4536 1111   17  546   38   13  447    4  192   50   16
    6  147 2025   19   14   22    4 1920 4613  469    4   22   71   87
   12   16   43  530   38   76   15   13 1247    4   22   17  515   17
   12   16  626   18    2    5   62  386   12    8  316    8  106    5
    4 2223 5244   16  480   66 3785   33    4  130   12   16   38  619
    5   25  124   51   36  135   48   25 1415   33    6   22   12  215
   28   77   52    5   14  407   16   82    2    8    4  107  117 5952
   15  256    4    2    7 3766    5  723   36   71   43  530  476   26
  400  317   46    7    4    2 1029   13  104   88    4  381   15  297
   98   32 2071   56   26  141    6  194 7486   18    4  226   22   21
  134  476   26  480    5  144   30 5535   18   51   36   28  224   92
   25  104    4  226   65   16   38 1334   88   12   16  283    5   16
 4472  113  103   32   15   16 5345   19  178   32]

 

# embedding 층 : rnn 가장 기본층, 첫번째 층으로 사용됨
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, Flatten

model = Sequential()
# input_dim 입력 데이터 차수
# output_dim 출력 차수
# input_length 입력데이터 갯수
model.add(Embedding(input_dim = num_words, output_dim = 32, input_length = max_len)) 
model.add(Flatten())
model.add(Dense(1, activation = 'sigmoid'))

 

model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['acc'])
model.summary()

Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 500, 32)           320000    
_________________________________________________________________
flatten_1 (Flatten)          (None, 16000)             0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 16001     
=================================================================
Total params: 336,001
Trainable params: 336,001
Non-trainable params: 0
_________________________________________________________________

 

history = model.fit(pad_x_train, y_train, batch_size = 32, epochs = 30, validation_split = 0.2)


Epoch 1/30
625/625 [==============================] - 2s 3ms/step - loss: 0.4567 - acc: 0.7761 - val_loss: 0.3132 - val_acc: 0.8670
Epoch 2/30
625/625 [==============================] - 2s 3ms/step - loss: 0.2001 - acc: 0.9254 - val_loss: 0.2965 - val_acc: 0.8762
Epoch 3/30
625/625 [==============================] - 2s 3ms/step - loss: 0.1057 - acc: 0.9703 - val_loss: 0.3016 - val_acc: 0.8780
Epoch 4/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0498 - acc: 0.9912 - val_loss: 0.3101 - val_acc: 0.8838
Epoch 5/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0232 - acc: 0.9979 - val_loss: 0.3377 - val_acc: 0.8838
Epoch 6/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0116 - acc: 0.9995 - val_loss: 0.3689 - val_acc: 0.8798
Epoch 7/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0063 - acc: 0.9999 - val_loss: 0.3901 - val_acc: 0.8820
Epoch 8/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0036 - acc: 0.9999 - val_loss: 0.4229 - val_acc: 0.8776
Epoch 9/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.4352 - val_acc: 0.8800
Epoch 10/30
625/625 [==============================] - 2s 3ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.4565 - val_acc: 0.8798
Epoch 11/30
625/625 [==============================] - 2s 3ms/step - loss: 9.0908e-04 - acc: 1.0000 - val_loss: 0.4762 - val_acc: 0.8786
Epoch 12/30
625/625 [==============================] - 2s 3ms/step - loss: 6.2279e-04 - acc: 1.0000 - val_loss: 0.4971 - val_acc: 0.8778
Epoch 13/30
625/625 [==============================] - 2s 3ms/step - loss: 4.0832e-04 - acc: 1.0000 - val_loss: 0.5179 - val_acc: 0.8774
Epoch 14/30
625/625 [==============================] - 2s 3ms/step - loss: 2.8227e-04 - acc: 1.0000 - val_loss: 0.5364 - val_acc: 0.8758
Epoch 15/30
625/625 [==============================] - 2s 3ms/step - loss: 1.9264e-04 - acc: 1.0000 - val_loss: 0.5530 - val_acc: 0.8768
Epoch 16/30
625/625 [==============================] - 2s 3ms/step - loss: 1.3397e-04 - acc: 1.0000 - val_loss: 0.5750 - val_acc: 0.8752
Epoch 17/30
625/625 [==============================] - 2s 3ms/step - loss: 9.3561e-05 - acc: 1.0000 - val_loss: 0.5911 - val_acc: 0.8766
Epoch 18/30
625/625 [==============================] - 2s 3ms/step - loss: 6.5905e-05 - acc: 1.0000 - val_loss: 0.6112 - val_acc: 0.8768
Epoch 19/30
625/625 [==============================] - 2s 3ms/step - loss: 4.5675e-05 - acc: 1.0000 - val_loss: 0.6288 - val_acc: 0.8758
Epoch 20/30
625/625 [==============================] - 2s 3ms/step - loss: 3.2030e-05 - acc: 1.0000 - val_loss: 0.6464 - val_acc: 0.8760
Epoch 21/30
625/625 [==============================] - 2s 3ms/step - loss: 2.2697e-05 - acc: 1.0000 - val_loss: 0.6652 - val_acc: 0.8746
Epoch 22/30
625/625 [==============================] - 2s 3ms/step - loss: 1.6130e-05 - acc: 1.0000 - val_loss: 0.6839 - val_acc: 0.8756
Epoch 23/30
625/625 [==============================] - 2s 3ms/step - loss: 1.1474e-05 - acc: 1.0000 - val_loss: 0.7017 - val_acc: 0.8754
Epoch 24/30
625/625 [==============================] - 2s 3ms/step - loss: 8.1176e-06 - acc: 1.0000 - val_loss: 0.7189 - val_acc: 0.8746
Epoch 25/30
625/625 [==============================] - 2s 3ms/step - loss: 5.8811e-06 - acc: 1.0000 - val_loss: 0.7372 - val_acc: 0.8748
Epoch 26/30
625/625 [==============================] - 2s 3ms/step - loss: 4.2489e-06 - acc: 1.0000 - val_loss: 0.7547 - val_acc: 0.8740
Epoch 27/30
625/625 [==============================] - 2s 3ms/step - loss: 3.0487e-06 - acc: 1.0000 - val_loss: 0.7724 - val_acc: 0.8750
Epoch 28/30
625/625 [==============================] - 2s 3ms/step - loss: 2.2476e-06 - acc: 1.0000 - val_loss: 0.7881 - val_acc: 0.8748
Epoch 29/30
625/625 [==============================] - 2s 3ms/step - loss: 1.6460e-06 - acc: 1.0000 - val_loss: 0.8056 - val_acc: 0.8746
Epoch 30/30
625/625 [==============================] - 2s 3ms/step - loss: 1.2241e-06 - acc: 1.0000 - val_loss: 0.8222 - val_acc: 0.8748

 

import matplotlib.pyplot as plt
plt.figure(figsize = (12, 4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b-', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history.history['acc'], 'g-', label='acc')
plt.plot(history.history['val_acc'], 'k--', label='val_acc')
plt.xlabel('Epoch')
plt.legend()
plt.show()

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

58. IMDB || SimpleRNN  (0) 2021.12.07
57. seed || simpleRNN  (0) 2021.12.07
54. glob-clothes || conv 다중 분류  (0) 2021.12.07
53. glob-clothes || 데이터셋만들기  (0) 2021.12.07
52. ImageDataGenerator || 이미지 조회  (0) 2021.12.07
728x90
반응형

다중 레이블

import pandas as pd
train_df = pd.read_csv("./clothess/train.csv")
val_df = pd.read_csv("./clothess/val.csv")
test_df = pd.read_csv("./clothess/test.csv")
train_df.head()




image	black	blue	brown	green	red	white	dress	shirt	pants	shorts	shoes
0	./clothess\blue_pants\251.jpg	0.0	1.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
1	./clothess\green_pants\162.jpg	0.0	0.0	0.0	1.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
2	./clothess\red_pants\160.jpg	0.0	0.0	0.0	0.0	1.0	0.0	0.0	0.0	0.0	0.0	0.0
3	./clothess\blue_pants\642.jpg	0.0	1.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
4	./clothess\red_pants\249.jpg	0.0	0.0	0.0	0.0	1.0	0.0	0.0	0.0	0.0	0.0	0.0

 

train_df.info()



<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5721 entries, 0 to 5720
Data columns (total 12 columns):
 #   Column  Non-Null Count  Dtype  
---  ------  --------------  -----  
 0   image   5721 non-null   object 
 1   black   5721 non-null   float64
 2   blue    5721 non-null   float64
 3   brown   5721 non-null   float64
 4   green   5721 non-null   float64
 5   red     5721 non-null   float64
 6   white   5721 non-null   float64
 7   dress   5721 non-null   float64
 8   shirt   5721 non-null   float64
 9   pants   5721 non-null   float64
 10  shorts  5721 non-null   float64
 11  shoes   5721 non-null   float64
dtypes: float64(11), object(1)
memory usage: 536.5+ KB

 

val_df.info()



<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2452 entries, 0 to 2451
Data columns (total 12 columns):
 #   Column  Non-Null Count  Dtype  
---  ------  --------------  -----  
 0   image   2452 non-null   object 
 1   black   2452 non-null   float64
 2   blue    2452 non-null   float64
 3   brown   2452 non-null   float64
 4   green   2452 non-null   float64
 5   red     2452 non-null   float64
 6   white   2452 non-null   float64
 7   dress   2452 non-null   float64
 8   shirt   2452 non-null   float64
 9   pants   2452 non-null   float64
 10  shorts  2452 non-null   float64
 11  shoes   2452 non-null   float64
dtypes: float64(11), object(1)
memory usage: 230.0+ KB

 

from tensorflow.keras.preprocessing.image import ImageDataGenerator

train_dagen = ImageDataGenerator(rescale = 1./255)
val_dagen = ImageDataGenerator(rescale = 1./255)

# import tensorflow.keras.models
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D,MaxPool2D, Dropout

model = Sequential()
model.add(Flatten(input_shape = (112,112,3))) # 1차원 배열
model.add(Dense(128, activation = 'relu')) # 128개 출력
model.add(Dense(64, activation = 'relu')) # 64개 출력
model.add(Dense(11, activation = 'sigmoid')) # 11개 출력, 다중레이블
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics=['acc'])

 

batch_size = 32
class_col = ['black', 'blue', 'brown', 'green', 'red', 'white', 'dress', 'shirt', 'pants', 'shorts', 'shoes']
# flow_from_dataframe dataframe 일ㄱ기
# flow_from_directory
train_generator = train_dagen.flow_from_dataframe(
        dataframe = train_df, # 사용할 데이터 프레임
        directory = None, # 이미지 저장폴더
        x_col = 'image', # 학습데이터 저장 컬럼
        y_col = class_col, # 레이블데이터 저장 컬럼
        target_size = (112, 112), #이미지 크기
        color_mode = 'rgb', # 색상 설정
        class_mode = 'raw', #배열
        batch_size = batch_size, # 생성하는 이미지 갯수
        shuffle = True, # 이미지 섞어서 생성
        seed  = 42) # 랜덤 시드
val_generator = val_dagen.flow_from_dataframe(
        dataframe = val_df, # 사용할 데이터 프레임
        directory = None, # 이미지 저장폴더
        x_col = 'image', # 학습데이터 저장 컬럼
        y_col = class_col, # 레이블데이터 저장 컬럼
        target_size = (112, 112), #이미지 크기
        color_mode = 'rgb', # 색상 설정
        class_mode = 'raw', #배열
        batch_size = batch_size, # 생성하는 이미지 갯수
        shuffle = True, # 이미지 섞어서 생성
        seed  = 42) # 랜덤 시드
        
        
        
        
Found 5721 validated image filenames.
Found 2452 validated image filenames.

 

import matplotlib.pylab as plt
batch = next(train_generator)
image, level = batch[0], batch[1]
image[0]
level[1]
plt.imshow(image[0])

 

# 데이터 갯수/32 나눈 몫
def get_steps(num_samples, batch_size) :
    if (num_samples % batch_size) > 0:
        return (num_samples // batch_size) +1
    else :
        return num_samples // batch_size

 

# steps_per_epoch : epoch 시 이동 갯수
history = model.fit(train_generator,
                   steps_per_epoch = get_steps(len(train_df), batch_size),
                   validation_data = val_generator,
                   validation_steps = get_steps(len(val_df), batch_size),
                   epochs = 10)
                   
                   
                   
Epoch 1/10
179/179 [==============================] - 26s 146ms/step - loss: 0.3128 - acc: 0.5761 - val_loss: 0.1889 - val_acc: 0.6741
Epoch 2/10
179/179 [==============================] - 8s 46ms/step - loss: 0.1353 - acc: 0.7312 - val_loss: 0.1264 - val_acc: 0.7504
Epoch 3/10
179/179 [==============================] - 8s 46ms/step - loss: 0.1202 - acc: 0.7651 - val_loss: 0.1059 - val_acc: 0.7802
Epoch 4/10
179/179 [==============================] - 8s 46ms/step - loss: 0.1101 - acc: 0.7838 - val_loss: 0.1486 - val_acc: 0.7259
Epoch 5/10
179/179 [==============================] - 8s 46ms/step - loss: 0.1015 - acc: 0.8032 - val_loss: 0.0931 - val_acc: 0.8120
Epoch 6/10
179/179 [==============================] - 8s 46ms/step - loss: 0.0824 - acc: 0.8369 - val_loss: 0.0868 - val_acc: 0.8308
Epoch 7/10
179/179 [==============================] - 8s 46ms/step - loss: 0.0786 - acc: 0.8371 - val_loss: 0.0801 - val_acc: 0.8389
Epoch 8/10
179/179 [==============================] - 8s 46ms/step - loss: 0.0716 - acc: 0.8525 - val_loss: 0.0760 - val_acc: 0.8548
Epoch 9/10
179/179 [==============================] - 8s 46ms/step - loss: 0.0622 - acc: 0.8782 - val_loss: 0.0748 - val_acc: 0.8442
Epoch 10/10
179/179 [==============================] - 8s 47ms/step - loss: 0.0609 - acc: 0.8797 - val_loss: 0.0879 - val_acc: 0.8418

 

import matplotlib.pyplot as plt
plt.figure(figsize = (12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['loss'], 'b-', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(history.history['acc'], 'g-', label='accuracy')
plt.plot(history.history['val_acc'], 'k--', label='val_accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.show()

테스트 데이터를 이용하여 예측 하기

test_datagen = ImageDataGenerator(rescale = 1./255)
test_generator = test_datagen.flow_from_dataframe(
                dataframe = test_df, 
                directory = None,
                x_col = 'image',
                y_col = None,
                target_size = (112, 112),
                color_mode = 'rgb',
                class_mode = None,
                batch_size = batch_size,
                suffle = False)
                
                
# Found 3503 validated image filenames.

 

preds = model.predict(test_generator, steps=32)

 

off = 0
do_preds = preds[off:off+8]
do_preds


array([[3.32533419e-02, 7.11781383e-02, 5.13464212e-04, 1.82812870e-01,
        8.54158352e-05, 7.91194558e-01, 1.21990088e-28, 1.47551075e-29,
        1.93555780e-17, 7.12528839e-29, 1.73372027e-16],
       [6.13263249e-03, 2.04395413e-01, 1.71032548e-03, 3.68385911e-02,
        6.75260671e-05, 6.42114520e-01, 1.42349242e-26, 3.59089307e-29,
        4.28250958e-17, 8.94882424e-27, 7.64378215e-17],
       [8.58788490e-01, 9.29567218e-03, 1.12130190e-04, 5.59631884e-02,
        1.80703537e-05, 5.41556078e-10, 2.68478816e-12, 5.24902760e-15,
        3.93610627e-18, 1.41551403e-14, 2.81184884e-11],
       [5.79097867e-03, 3.95524800e-02, 2.32820511e-02, 1.91599131e-04,
        9.70587730e-01, 3.22788954e-04, 5.19957627e-18, 4.32147078e-16,
        3.17167161e-11, 2.16641580e-17, 3.72485665e-09],
       [9.08219516e-02, 2.23988742e-01, 5.21731377e-03, 5.20364940e-02,
        9.54627991e-04, 2.84431517e-01, 7.32842328e-11, 6.32213336e-12,
        2.34831932e-09, 1.10864234e-11, 1.59157683e-08],
       [2.99239159e-02, 1.85096234e-01, 5.08943200e-03, 3.03146243e-03,
        1.25288963e-04, 8.50661397e-01, 6.25681374e-16, 3.09247150e-16,
        5.95982153e-10, 7.95468017e-16, 1.84371973e-09],
       [8.56922865e-01, 1.54545009e-02, 1.15802668e-05, 1.12835467e-02,
        5.59813725e-06, 3.15491855e-10, 8.28860452e-11, 3.35589848e-16,
        1.13912270e-19, 1.78753945e-17, 1.88561420e-13],
       [6.52963221e-02, 8.60731602e-01, 4.45579886e-02, 3.68613005e-03,
        1.31562054e-02, 2.54813671e-01, 2.74723910e-09, 6.26615634e-11,
        1.98719277e-07, 3.27711436e-09, 1.12804935e-06]], dtype=float32)

 

for i, pred in enumerate(do_preds) :
    plt.subplot(2, 4, i+1)
    prob = zip(class_col, list(pred))
    prob = sorted(list(prob), key = lambda z : z[1], reverse = True)[:2]
    image = plt.imread(test_df['image'][i+off])
    plt.imshow(image)
    plt.title(f'{prob[0][0]}:{round(prob[0][1]*100, 2)}% \n{prob[1][0]}: {round(prob[1][1]*100, 2)}%')
    plt.tight_layout()

 

# # CNN을 이용하여 분석하기
model = Sequential([
    Conv2D(input_shape=(112, 112, 3), kernel_size = (3, 3),
          filters=32, padding = 'same', activation = 'relu'),
    Conv2D(kernel_size = (3, 3),
          filters=64, padding = 'same', activation = 'relu'),
    MaxPool2D(pool_size=(2,2)),
    Dropout(rate=0.5),
    Conv2D(kernel_size = (3, 3),
          filters=128, padding = 'same', activation = 'relu'),
    Conv2D(kernel_size = (3, 3),
          filters=256, padding = 'valid', activation = 'relu'),
    MaxPool2D(pool_size=(2,2)),
    Dropout(rate=0.5),
    Flatten(),
    Dense(units=512, activation='relu'),
    Dropout(rate=0.5),
    Dense(units=256, activation='relu'),
    Dropout(rate=0.5),
    Dense(units=11, activation='sigmoid')
])

 

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(train_generator, 
                    steps_per_epoch=get_steps(len(train_df), batch_size),
                    validation_data = val_generator, 
                    validation_steps=get_steps(len(val_df), batch_size),
                    epochs = 10)
                    
                    
                    
Epoch 1/10
179/179 [==============================] - 250s 1s/step - loss: 0.1824 - acc: 0.5745 - val_loss: 0.0724 - val_acc: 0.8254
Epoch 2/10
179/179 [==============================] - 245s 1s/step - loss: 0.0694 - acc: 0.8743 - val_loss: 0.0446 - val_acc: 0.9156
Epoch 3/10
179/179 [==============================] - 242s 1s/step - loss: 0.0479 - acc: 0.9213 - val_loss: 0.0410 - val_acc: 0.9274
Epoch 4/10
179/179 [==============================] - 237s 1s/step - loss: 0.0363 - acc: 0.9428 - val_loss: 0.0423 - val_acc: 0.9396
Epoch 5/10
179/179 [==============================] - 237s 1s/step - loss: 0.0350 - acc: 0.9432 - val_loss: 0.0274 - val_acc: 0.9531
Epoch 6/10
179/179 [==============================] - 237s 1s/step - loss: 0.0292 - acc: 0.9519 - val_loss: 0.0313 - val_acc: 0.9519
Epoch 7/10
179/179 [==============================] - 236s 1s/step - loss: 0.0236 - acc: 0.9640 - val_loss: 0.0267 - val_acc: 0.9625
Epoch 8/10
179/179 [==============================] - 237s 1s/step - loss: 0.0214 - acc: 0.9661 - val_loss: 0.0300 - val_acc: 0.9580
Epoch 9/10
179/179 [==============================] - 236s 1s/step - loss: 0.0225 - acc: 0.9640 - val_loss: 0.0302 - val_acc: 0.9543
Epoch 10/10
179/179 [==============================] - 236s 1s/step - loss: 0.0188 - acc: 0.9687 - val_loss: 0.0275 - val_acc: 0.9543

 

import matplotlib.pyplot as plt
plt.figure(figsize = (12, 4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], 'b-', label='loss')
plt.plot(history.history['val_loss'], 'r--', label='val_loss')
plt.xlabel('Epoch')
plt.legend()
plt.subplot(1,2,2)
plt.plot(history.history['acc'], 'g-', label='acc')
plt.plot(history.history['val_acc'], 'k--', label='val_acc')
plt.xlabel('Epoch')
plt.legend()
plt.show()

 

model.evaluate(test_generator)


110/110 [==============================] - 3s 27ms/step - loss: 0.0000e+00 - acc: 0.0000e+00
[0.0, 0.0]

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

57. seed || simpleRNN  (0) 2021.12.07
56. 영화리뷰 분석  (0) 2021.12.07
53. glob-clothes || 데이터셋만들기  (0) 2021.12.07
52. ImageDataGenerator || 이미지 조회  (0) 2021.12.07
51. cifar10 || imageDataGenerator  (0) 2021.12.07
728x90
반응형
import numpy as np
import pandas as pd
import tensorflow as tf
import glob as glob
all_data = np.array(glob.glob('./clothes/*/*.jpg', recursive=True))
'''recursive = 폴더 아래 폴더또 있을 때, 지정한 하위폴더까지 검색설정
glob.glob : ./clothes/*/*.jpg
.:현재폴더
clothes : 현재폴더하위의 cloths 폴더
* 하위폴더
* .jpg : 모든 jpg
array로 만들어서 저장
'''

 

print(all_data[:5])

['./clothes\\black_dress\\1.jpg' './clothes\\black_dress\\10.jpg'
 './clothes\\black_dress\\100.jpg' './clothes\\black_dress\\101.jpg'
 './clothes\\black_dress\\102.jpg']

 

def check_cc(color, clothes) :
    labels = np.zeros(11,)
    # color check
    if ( color == 'black') :
        labels[0] = 1
        color_index = 0
    elif ( color == 'blue') :
        labels[1] = 1
        color_index = 1
    elif ( color == 'brown') :
        labels[2] = 1
        color_index = 2
    elif ( color == 'green') :
        labels[3] = 1
        color_index = 3
    elif ( color == 'red') :
        labels[4] = 1
        color_index = 4
    elif ( color == 'white') :
        labels[5] = 1
        color_index = 5
    # clothes check
        # color check
    if ( color == 'dress') :
        labels[6] = 1
    elif ( color == 'shirt') :
        labels[7] = 1
    elif ( color == 'pants') :
        labels[8] = 1
    elif ( color == 'shorts') :
        labels[9] = 1
    elif ( color == 'shoes') :
        labels[10] = 1
    return labels, color_index

 

print(all_data.shape)
all_labels = np.empty((all_data.shape[0], 11))
all_color_labels = np.empty((all_data.shape[0], 1))

print(all_labels.shape)
print(all_color_labels.shape)

(16170,)
(16170, 11)
(16170, 1)

 

for i, data in enumerate(all_data) :
    color_and_clothes = all_data[i].split('\\')[1].split('_')
    color = color_and_clothes[0]
    print(color_and_clothes)
    clothes = color_and_clothes[1]
    labels, color_index = check_cc(color, clothes)
    all_labels[i] = labels;
    all_color_labels[i] = color_index
print(all_labels[:10])


['black', 'dress']
['black', 'dress']
['black', 'dress']
['black', 'dress']
['black', 'dress']

 

# 훈련, 테스트, 검증데이터 분리
from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split\
(all_data, all_labels, shuffle = True, test_size = 0.3, random_state = 99)
train_x, val_x, train_y, val_y = train_test_split\
(train_x, train_y, shuffle = True, test_size = 0.3, random_state = 99)
print(train_x.shape)
print(val_x.shape)
print(test_x.shape)

(7923,)
(3396,)
(4851,)

 

train_df = pd.DataFrame(
    {'image':train_x, 'black':train_y[:,0], 'blue':train_y[:,1],
    'brown':train_y[:,2], 'green':train_y[:,3], 'red':train_y[:,4],
     'white':train_y[:,5], 'dress':train_y[:,6], 'shirt':train_y[:,7],
     'pants':train_y[:,8], 'shorts':train_y[:,9], 'shoes':train_y[:,10]})
val_df = pd.DataFrame(
    {'image':val_x, 'black':val_y[:,0], 'blue':val_y[:,1],
    'brown':val_y[:,2], 'green':val_y[:,3], 'red':val_y[:,4],
     'white':val_y[:,5], 'dress':val_y[:,6], 'shirt':val_y[:,7],
     'pants':val_y[:,8], 'shorts':val_y[:,9], 'shoes':val_y[:,10]})
test_df = pd.DataFrame(
    {'image':test_x, 'black':test_y[:,0], 'blue':test_y[:,1],
    'brown':test_y[:,2], 'green':test_y[:,3], 'red':test_y[:,4],
     'white':test_y[:,5], 'dress':test_y[:,6], 'shirt':test_y[:,7],
     'pants':test_y[:,8], 'shorts':test_y[:,9], 'shoes':test_y[:,10]})

 

train_df.head()

	image	black	blue	brown	green	red	white	dress	shirt	pants	shorts	shoes
0	./clothes\silver_skirt\000095.jpg	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
1	./clothes\blue_shirt\82.jpg	0.0	1.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
2	./clothes\pink_hoodie\000267.jpg	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
3	./clothes\red_shoes\159.jpg	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0
4	./clothes\red_shoes\601.jpg	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0	0.0

 

train_df.to_csv('./clothes.train.csv', index = None)
val_df.to_csv('./clothes.val.csv', index = None)
test_df.to_csv('./clothes.test.csv', index = None)

 

df1 = pd.read_csv("./clothes.train.csv")
df2 = pd.read_csv("./clothes.val.csv")
df3 = pd.read_csv("./clothes.test.csv")

 

df1.info()

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 7923 entries, 0 to 7922
Data columns (total 12 columns):
 #   Column  Non-Null Count  Dtype  
---  ------  --------------  -----  
 0   image   7923 non-null   object 
 1   black   7923 non-null   float64
 2   blue    7923 non-null   float64
 3   brown   7923 non-null   float64
 4   green   7923 non-null   float64
 5   red     7923 non-null   float64
 6   white   7923 non-null   float64
 7   dress   7923 non-null   float64
 8   shirt   7923 non-null   float64
 9   pants   7923 non-null   float64
 10  shorts  7923 non-null   float64
 11  shoes   7923 non-null   float64
dtypes: float64(11), object(1)
memory usage: 742.9+ KB

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

56. 영화리뷰 분석  (0) 2021.12.07
54. glob-clothes || conv 다중 분류  (0) 2021.12.07
52. ImageDataGenerator || 이미지 조회  (0) 2021.12.07
51. cifar10 || imageDataGenerator  (0) 2021.12.07
50. ImageDataGenerator  (0) 2021.12.07
728x90
반응형

https://www.kaggle.com/tongpython/cat-and-dog

# 
drive_path = "C:/-/-/Machine_Learning_P_Guide/Dacon/support/"
source_filename = drive_path + "dataset/archive.zip"
# 저장경로
extract_folder = "dataset/"
# 압축해제
import shutil
shutil.unpack_archive(source_filename, extract_folder)

 

# 저장경로
extract_folder = "dataset/"
# 저장위치
train_dir = extract_folder + "training_set"
test_dir = extract_folder + "test_set"
print(train_dir)

# dataset/train_set

 

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator

import numpy as np
import matplotlib.pylab as plt

 

# rescale로 정규화
image_gen = ImageDataGenerator(rescale=(1/255.))

 

# flow_from_directory 함수 :폴더에서 이미지 가져와서 제네레이터 객체로 정리
# batch_size = 32 : 32개의 이미지를 로드
# target_size : 
# 
# seed : 랜덤 seed
train_gen = image_gen.flow_from_directory(train_dir,
                                          batch_size = 32,
                                          target_size = (224, 224,),
                                          classes=['cats', 'dogs'],
                                          class_mode = 'binary',
                                          seed = 2020)
test_gen = image_gen.flow_from_directory(test_dir,
                                          batch_size = 32,
                                          target_size = (224, 224,),
                                          classes=['cats', 'dogs'],
                                          class_mode = 'binary',
                                          seed = 2020)
                                          
                                          
                                          
                                          
Found 8005 images belonging to 2 classes.
Found 2023 images belonging to 2 classes.

 

# 샘플 이미지 출력
class_labels = ['cats', 'dogs']
batch = next(train_gen)
images, labels = batch[0], batch[1] # 0번 이미지데이터 1번 레이블
print(labels[:10])
plt.figure(figsize=(16,8))
for i in range(32) :
    ax = plt.subplot(4,8,i+1)
    plt.imshow(images[i])
    plt.title(class_labels[labels[i].astype(np.int)])
    plt.axis("off")
plt.tight_layout()
plt.show()



[1. 0. 1. 1. 0. 1. 1. 1. 0. 1.]

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

54. glob-clothes || conv 다중 분류  (0) 2021.12.07
53. glob-clothes || 데이터셋만들기  (0) 2021.12.07
51. cifar10 || imageDataGenerator  (0) 2021.12.07
50. ImageDataGenerator  (0) 2021.12.07
49. cifar10 || convolution  (0) 2021.11.26
728x90
반응형
# 증식된 데이터 학습하기
import numpy as np
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))

x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std

from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777)

 

print(y_train.shape)
y_train = np.squeeze(y_train)
y_val = np.squeeze(y_val)
print(y_train.shape)

# (35000, 1)
# (35000,)

 

from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(horizontal_flip= True, # 수평방향뒤집기
                                   zoom_range =0.2, # 이미지 확대/축소
                                   width_shift_range = 0.1, # 가로방향 이동
                                   height_shift_range = 0.1, # 세로방향 이동
                                   rotation_range = 30, # 이미지 회전
                                   fill_mode = 'nearest') # 이미지 변환시 픽셀 채울수 있는 방법

 

# 검증데이터셋에는 변환을 사용하지 않음
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train, batch_size = batch_size )
val_generator = val_datagen.flow(x_val, y_val, batch_size = batch_size)
배치정규화 batchnormalization 학습데이터의 불확실성을 해소하기 위한 설정
큰 범위의 출력값의 분포를 줄여주어. 불확실성을 어느정도 감소시키는 방법
출력값이 손실/폭발 경우 범위내의 값으로 변환하여 학습하는 기능
# 검증데이터셋에는 변환을 사용하지 않음
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train, batch_size = batch_size )
val_generator = val_datagen.flow(x_val, y_val, batch_size = batch_size)

 

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam

model = Sequential()
model.add(Conv2D(kernel_size=3, filters=32, padding='same', input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(Conv2D(kernel_size=3, filters=32, padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))

model.add(Conv2D(kernel_size=3, filters=64, padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(kernel_size=3, filters=64, padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))

model.add(Conv2D(kernel_size=3, filters=128, padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(kernel_size=3, filters=128, padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=2, padding = 'same'))

model.add(Flatten())


model.add(Dense(256, Activation('relu'))) 
model.add(Dense(10, Activation('softmax')))

 

# 1e-4 : 0.00001
model.compile(optimizer=Adam(1e-4), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
model.summary()

Model: "sequential_4"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_8 (Conv2D)           (None, 32, 32, 32)        896       
                                                                 
 activation_4 (Activation)   (None, 32, 32, 32)        0         
                                                                 
 conv2d_9 (Conv2D)           (None, 32, 32, 32)        9248      
                                                                 
 activation_5 (Activation)   (None, 32, 32, 32)        0         
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 16, 16, 32)       0         
 2D)                                                             
                                                                 
 conv2d_10 (Conv2D)          (None, 16, 16, 64)        18496     
                                                                 
 activation_6 (Activation)   (None, 16, 16, 64)        0         
                                                                 
 conv2d_11 (Conv2D)          (None, 16, 16, 64)        36928     
                                                                 
 activation_7 (Activation)   (None, 16, 16, 64)        0         
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 8, 8, 64)         0         
 2D)                                                             
                                                                 
 conv2d_12 (Conv2D)          (None, 8, 8, 128)         73856     
                                                                 
 activation_8 (Activation)   (None, 8, 8, 128)         0         
                                                                 
 conv2d_13 (Conv2D)          (None, 8, 8, 128)         147584    
                                                                 
 activation_9 (Activation)   (None, 8, 8, 128)         0         
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 4, 4, 128)        0         
 2D)                                                             
                                                                 
 flatten (Flatten)           (None, 2048)              0         
                                                                 
 dense (Dense)               (None, 256)               524544    
                                                                 
 dense_1 (Dense)             (None, 10)                2570      
                                                                 
=================================================================
Total params: 814,122
Trainable params: 814,122
Non-trainable params: 0
_________________________________________________________________

 

def get_step(train_len, batch_size) :
    if(train_len % batch_size > 0) :
        return train_len // batch_size + 1
    else : 
        return train_len // batch_size
history = model.fit(train_generator,
                    epochs = 100,
                    steps_per_epoch = get_step(len(x_train), batch_size),
                    validation_data = val_generator,
                    validation_steps = get_step(len(x_val), batch_size))
                    

Epoch 1/100
1094/1094 [==============================] - 50s 36ms/step - loss: 1.7521 - accuracy: 0.3640 - val_loss: 1.5137 - val_accuracy: 0.4489
Epoch 2/100
1094/1094 [==============================] - 40s 37ms/step - loss: 1.4751 - accuracy: 0.4685 - val_loss: 1.2843 - val_accuracy: 0.5383
Epoch 3/100
1094/1094 [==============================] - 40s 37ms/step - loss: 1.3595 - accuracy: 0.5118 - val_loss: 1.1808 - val_accuracy: 0.5744
Epoch 4/100
1094/1094 [==============================] - 40s 36ms/step - loss: 1.2654 - accuracy: 0.5486 - val_loss: 1.2320 - val_accuracy: 0.5648

 

 

 

 

 

 

반응형

'Data_Science > Data_Analysis_Py' 카테고리의 다른 글

53. glob-clothes || 데이터셋만들기  (0) 2021.12.07
52. ImageDataGenerator || 이미지 조회  (0) 2021.12.07
50. ImageDataGenerator  (0) 2021.12.07
49. cifar10 || convolution  (0) 2021.11.26
48. Fashion MNIST || convolution  (0) 2021.11.26

+ Recent posts