Keras 라이브러리를 이용하여 cifar 10 데이터를 기본적인 CNN 구조를 이용하여 수행
사용하는 레이어들을 추가한다.
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
모델도 추가한다.
from keras.models import Model
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
사용하는 레이어들을 추가한다.
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
모델도 추가한다.
구조는 기본적으로 아래와 같다.
이는 keras에서 제공하는 cifar10 예제를 참고하였다.
conv2D -> conv2D -> maxpooling -> dropout
-> conv2D -> conv2D -> maxpooling -> dropout
-> flatten -> Dense -> dropout ->Dense(마지막은 분류하고자 하는 개수, 여기서는 10)
# block1
conv2D -> conv2D -> maxpooling -> dropout
-> conv2D -> conv2D -> maxpooling -> dropout
-> flatten -> Dense -> dropout ->Dense(마지막은 분류하고자 하는 개수, 여기서는 10)
블럭을 생성하기 전에 Input layer에 형태를 넣어줘야한다.
#Input
inputs =Input((32,32,3))
x = Conv2D(32, (3,3), activation='relu', padding='same', name='block1_conv1')(inputs)
x = Conv2D(32, (3,3), activation='relu', padding='valid', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), name='block1_pool')(x)
x = Dropout(0.25)(x)
# block2
x = Conv2D(64, (3,3), activation='relu', padding='same', name='block2_conv1')(inputs)
x = Conv2D(64, (3,3), activation='relu', padding='valid', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), name='block2_pool')(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512, activation='relu', name='dense1')(x)
x = Dropout(0.5)(x)
prediction = Dense(num_classes, activation='softmax', name='dense2')(x)
#model 형성
model = Model(inputs = inputs, outputs= prediction)model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
#전체코드
#전체코드
from keras.layers import Conv2D, MaxPooling2D from keras.models import Model import os batch_size = 32num_classes = 10epochs = 10data_augmentation = Truenum_predictions = 20save_dir = os.path.join(os.getcwd(), '/models', 'saved_models') model_name = 'keras_cifar10_trained_model.h5' (x_train, y_train), (x_test, y_test) = cifar10.load_data() print('x_train shape: ', x_train.shape) print(x_train.shape[0] , 'train samples') print(x_test.shape[0], 'test samples') y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) inputs = Input((32,32,3)) # block1x = Conv2D(32, (3,3), activation='relu', padding='same', name='block1_conv1')(inputs) x = Conv2D(32, (3,3), activation='relu', padding='valid', name='block1_conv2')(x) x = MaxPooling2D((2, 2), name='block1_pool')(x) x = Dropout(0.25)(x) # block2x = Conv2D(64, (3,3), activation='relu', padding='same', name='block2_conv1')(inputs) x = Conv2D(64, (3,3), activation='relu', padding='valid', name='block2_conv2')(x) x = MaxPooling2D((2, 2), name='block2_pool')(x) x = Dropout(0.25)(x) x = Flatten()(x) x = Dense(512, activation='relu', name='dense1')(x) x = Dropout(0.5)(x) prediction = Dense(num_classes, activation='softmax', name='dense2')(x) opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6) model = Model(inputs = inputs, outputs= prediction) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255x_test /= 255 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True)
댓글
댓글 쓰기