개발환경:
- 운영체제: Window 10 64 bit
- 개발언어: Python 3.11
- 개발 툴: Jupiter Notebook
- 추가 패키지: Numpy, Pandas Matplotlib, Sklearn, Tensorflow, keras, Open CV
공부한 사이트: https://www.kaggle.com/code/adinishad/driver-drowsiness-using-keras
데이터
1. 사진 데이터
사진 출처: https://www.kaggle.com/datasets/dheerajperumandla/drowsiness-dataset?resource=download
캐글의 사진들을 사용했다.
총 170메가정도 되며
- 눈 감은 사진
- 눈 뜬 사진
- 하품하는 사진
- 하품하지 않는 사진
이렇게 총 4가지로 이루어져 있다.
2. 얼굴 감지 코드
opencv에서 사용하는 얼굴 감지 코드
출처: https://github.com/opencv/opencv/tree/master/data/haarcascades
깃허브에서 다운받을수도 있다. xml 파일은 따로 아래 첨부했다.
난 이렇게 모든 파일을 모아놓고 시작했다! h5파일과 keras파일은 모델 save할 때 생긴것이고
archive파일은 다운받은 사진이다.
# 파이썬3환경에서 라이브러리 import 하기
import numpy as np
import pandas as pd
import os
import cv2
라벨¶
labels = os.listdir('./archive/train')
labels
['Closed', 'no_yawn', 'Open', 'yawn']
이미지 띄우기¶
import matplotlib.pyplot as plt
plt.imshow(plt.imread("./archive/train/Closed/_2.jpg"))
<matplotlib.image.AxesImage at 0x27128bd8970>
이미지 배치¶
# 하품 이미지 배치
a = plt.imread("./archive/train/yawn/10.jpg")
# 이미지 쉐입
a.shape
(480, 640, 3)
# 이미지 띄우기
plt.imshow(plt.imread('./archive/train/yawn/10.jpg'))
<matplotlib.image.AxesImage at 0x27128cc7e20>
def face_for_yawn(direc="archive/train", face_cas_path="haarcascade_frontalface_default.xml"):
yaw_no = []
IMG_SIZE = 145
categories = ["yawn", "no_yawn"]
for category in categories:
path_link = os.path.join(direc, category)
class_num1 = categories.index(category)
print(class_num1)
for image in os.listdir(path_link):
image_array = cv2.imread(os.path.join(path_link, image), cv2.IMREAD_COLOR)
face_cascade = cv2.CascadeClassifier(face_cas_path)
faces = face_cascade.detectMultiScale(image_array, 1.3, 5)
for (x, y, w, h) in faces:
img = cv2.rectangle(image_array, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_color = img[y:y+h, x:x+w]
resized_array = cv2.resize(roi_color, (IMG_SIZE, IMG_SIZE))
yaw_no.append([resized_array, class_num1])
return yaw_no
face_for_yawn()
0
1
# 감은 눈과 안 감은 눈
def get_data(dir_path = "archive/train", eye_cas="haarcascade_frontalface_default.xml"):
labels = ['Closed', 'Open']
IMG_SIZE = 145
data = []
for label in labels:
path = os.path.join(dir_path, label)
class_num = labels.index(label)
class_num += 2
print(class_num)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
resized_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
data.append([resized_array, class_num])
except Exception as e:
print(e)
return data
data_train = get_data()
2
3
#이미지 확장과 어레이 변환
def append_data():
# total_data = []
yaw_no = face_for_yawn()
print(type(yaw_no))
data = get_data()
yaw_no.extend(data)
return np.array(yaw_no)
# 저장할 새 변수
new_data = append_data()
0
1
<class 'list'>
2
3
C:\Users\KDT103\AppData\Local\Temp\ipykernel_964\2652401494.py:8: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
return np.array(yaw_no)
# 라벨로 변환
X = []
y = []
for feature, label in new_data:
X.append(feature)
y.append(label)
# 어레이 재변환
X = np.array(X)
X = X.reshape(-1, 145, 145, 3)
# 라벨 바이너리화
from sklearn.preprocessing import LabelBinarizer
label_bin = LabelBinarizer()
y = label_bin.fit_transform(y)
# 라벨 어레이
y = np.array(y)
# 훈련자료/테스트자료 분리
from sklearn.model_selection import train_test_split
seed = 42
test_size = 0.30
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed, test_size=test_size)
# X_test 길이
print(len(X_test))
578
# pip install tensorflow
# pip install keras
Cell In[28], line 2
pip install keras
^
SyntaxError: invalid syntax
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
tf.__version__
'2.13.0'
train_generator = ImageDataGenerator(rescale = 1/255, zoom_range=0.2, horizontal_flip=True, rotation_range=30)
test_generator = ImageDataGenerator(rescale=1/255)
train_generator = train_generator.flow(np.array(X_train), y_train, shuffle=False)
test_generator = test_generator.flow(np.array(X_test), y_test, shuffle=False)
모델 생성¶
model = Sequential()
model.add(Conv2D(256, (3, 3), activation="relu", input_shape=X_train.shape[1:]))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(64, activation="relu"))
model.add(Dense(4, activation="softmax"))
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer="adam")
model.summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_8 (Conv2D) (None, 143, 143, 256) 7168
max_pooling2d_8 (MaxPoolin (None, 71, 71, 256) 0
g2D)
conv2d_9 (Conv2D) (None, 69, 69, 128) 295040
max_pooling2d_9 (MaxPoolin (None, 34, 34, 128) 0
g2D)
conv2d_10 (Conv2D) (None, 32, 32, 64) 73792
max_pooling2d_10 (MaxPooli (None, 16, 16, 64) 0
ng2D)
conv2d_11 (Conv2D) (None, 14, 14, 32) 18464
max_pooling2d_11 (MaxPooli (None, 7, 7, 32) 0
ng2D)
flatten_2 (Flatten) (None, 1568) 0
dropout_2 (Dropout) (None, 1568) 0
dense_4 (Dense) (None, 64) 100416
dense_5 (Dense) (None, 4) 260
=================================================================
Total params: 495140 (1.89 MB)
Trainable params: 495140 (1.89 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
history = model.fit(train_generator, epochs=50, validation_data=test_generator, shuffle=True, validation_steps=len(test_generator))
Epoch 1/50
43/43 [==============================] - 97s 2s/step - loss: 1.1451 - accuracy: 0.4937 - val_loss: 0.6538 - val_accuracy: 0.7197
Epoch 2/50
43/43 [==============================] - 96s 2s/step - loss: 0.4671 - accuracy: 0.8048 - val_loss: 0.3223 - val_accuracy: 0.8824
Epoch 3/50
43/43 [==============================] - 100s 2s/step - loss: 0.3778 - accuracy: 0.8382 - val_loss: 0.4045 - val_accuracy: 0.8304
Epoch 4/50
43/43 [==============================] - 98s 2s/step - loss: 0.3252 - accuracy: 0.8664 - val_loss: 0.2767 - val_accuracy: 0.9031
Epoch 5/50
43/43 [==============================] - 101s 2s/step - loss: 0.3268 - accuracy: 0.8708 - val_loss: 0.2687 - val_accuracy: 0.8979
Epoch 6/50
43/43 [==============================] - 99s 2s/step - loss: 0.2628 - accuracy: 0.9027 - val_loss: 0.2335 - val_accuracy: 0.9170
Epoch 7/50
43/43 [==============================] - 98s 2s/step - loss: 0.2640 - accuracy: 0.9035 - val_loss: 0.2139 - val_accuracy: 0.9170
Epoch 8/50
43/43 [==============================] - 97s 2s/step - loss: 0.2811 - accuracy: 0.8834 - val_loss: 0.2178 - val_accuracy: 0.9187
Epoch 9/50
43/43 [==============================] - 98s 2s/step - loss: 0.2285 - accuracy: 0.9094 - val_loss: 0.2297 - val_accuracy: 0.9083
Epoch 10/50
43/43 [==============================] - 98s 2s/step - loss: 0.2389 - accuracy: 0.9079 - val_loss: 0.2110 - val_accuracy: 0.9187
Epoch 11/50
43/43 [==============================] - 97s 2s/step - loss: 0.2206 - accuracy: 0.9094 - val_loss: 0.1907 - val_accuracy: 0.9291
Epoch 12/50
43/43 [==============================] - 97s 2s/step - loss: 0.1898 - accuracy: 0.9213 - val_loss: 0.1817 - val_accuracy: 0.9273
Epoch 13/50
43/43 [==============================] - 98s 2s/step - loss: 0.1816 - accuracy: 0.9250 - val_loss: 0.1635 - val_accuracy: 0.9412
Epoch 14/50
43/43 [==============================] - 95s 2s/step - loss: 0.1679 - accuracy: 0.9272 - val_loss: 0.1853 - val_accuracy: 0.9221
Epoch 15/50
43/43 [==============================] - 95s 2s/step - loss: 0.1750 - accuracy: 0.9265 - val_loss: 0.1613 - val_accuracy: 0.9291
Epoch 16/50
43/43 [==============================] - 95s 2s/step - loss: 0.1624 - accuracy: 0.9302 - val_loss: 0.1595 - val_accuracy: 0.9308
Epoch 17/50
43/43 [==============================] - 95s 2s/step - loss: 0.1502 - accuracy: 0.9391 - val_loss: 0.1751 - val_accuracy: 0.9343
Epoch 18/50
43/43 [==============================] - 95s 2s/step - loss: 0.1560 - accuracy: 0.9347 - val_loss: 0.2131 - val_accuracy: 0.9152
Epoch 19/50
43/43 [==============================] - 94s 2s/step - loss: 0.1663 - accuracy: 0.9332 - val_loss: 0.2166 - val_accuracy: 0.9066
Epoch 20/50
43/43 [==============================] - 94s 2s/step - loss: 0.1660 - accuracy: 0.9317 - val_loss: 0.1509 - val_accuracy: 0.9377
Epoch 21/50
43/43 [==============================] - 95s 2s/step - loss: 0.1417 - accuracy: 0.9465 - val_loss: 0.1719 - val_accuracy: 0.9343
Epoch 22/50
43/43 [==============================] - 94s 2s/step - loss: 0.1351 - accuracy: 0.9428 - val_loss: 0.1542 - val_accuracy: 0.9343
Epoch 23/50
43/43 [==============================] - 95s 2s/step - loss: 0.1360 - accuracy: 0.9488 - val_loss: 0.1729 - val_accuracy: 0.9291
Epoch 24/50
43/43 [==============================] - 95s 2s/step - loss: 0.1389 - accuracy: 0.9451 - val_loss: 0.1378 - val_accuracy: 0.9446
Epoch 25/50
43/43 [==============================] - 96s 2s/step - loss: 0.1424 - accuracy: 0.9384 - val_loss: 0.1424 - val_accuracy: 0.9446
Epoch 26/50
43/43 [==============================] - 95s 2s/step - loss: 0.1403 - accuracy: 0.9436 - val_loss: 0.1515 - val_accuracy: 0.9343
Epoch 27/50
43/43 [==============================] - 96s 2s/step - loss: 0.1126 - accuracy: 0.9562 - val_loss: 0.1217 - val_accuracy: 0.9412
Epoch 28/50
43/43 [==============================] - 95s 2s/step - loss: 0.1178 - accuracy: 0.9525 - val_loss: 0.1233 - val_accuracy: 0.9516
Epoch 29/50
43/43 [==============================] - 99s 2s/step - loss: 0.1201 - accuracy: 0.9555 - val_loss: 0.1082 - val_accuracy: 0.9637
Epoch 30/50
43/43 [==============================] - 100s 2s/step - loss: 0.1231 - accuracy: 0.9532 - val_loss: 0.1263 - val_accuracy: 0.9498
Epoch 31/50
43/43 [==============================] - 102s 2s/step - loss: 0.1316 - accuracy: 0.9495 - val_loss: 0.1244 - val_accuracy: 0.9567
Epoch 32/50
43/43 [==============================] - 99s 2s/step - loss: 0.1099 - accuracy: 0.9584 - val_loss: 0.1335 - val_accuracy: 0.9585
Epoch 33/50
43/43 [==============================] - 98s 2s/step - loss: 0.1181 - accuracy: 0.9488 - val_loss: 0.1388 - val_accuracy: 0.9516
Epoch 34/50
43/43 [==============================] - 99s 2s/step - loss: 0.0890 - accuracy: 0.9659 - val_loss: 0.1126 - val_accuracy: 0.9567
Epoch 35/50
43/43 [==============================] - 98s 2s/step - loss: 0.0952 - accuracy: 0.9629 - val_loss: 0.1715 - val_accuracy: 0.9412
Epoch 36/50
43/43 [==============================] - 98s 2s/step - loss: 0.1060 - accuracy: 0.9614 - val_loss: 0.1248 - val_accuracy: 0.9481
Epoch 37/50
43/43 [==============================] - 98s 2s/step - loss: 0.1036 - accuracy: 0.9614 - val_loss: 0.1906 - val_accuracy: 0.9481
Epoch 38/50
43/43 [==============================] - 98s 2s/step - loss: 0.1125 - accuracy: 0.9540 - val_loss: 0.1202 - val_accuracy: 0.9567
Epoch 39/50
43/43 [==============================] - 97s 2s/step - loss: 0.0926 - accuracy: 0.9651 - val_loss: 0.1265 - val_accuracy: 0.9550
Epoch 40/50
43/43 [==============================] - 99s 2s/step - loss: 0.0971 - accuracy: 0.9629 - val_loss: 0.1547 - val_accuracy: 0.9498
Epoch 41/50
43/43 [==============================] - 103s 2s/step - loss: 0.0963 - accuracy: 0.9644 - val_loss: 0.1281 - val_accuracy: 0.9516
Epoch 42/50
43/43 [==============================] - 99s 2s/step - loss: 0.0929 - accuracy: 0.9584 - val_loss: 0.1254 - val_accuracy: 0.9602
Epoch 43/50
43/43 [==============================] - 99s 2s/step - loss: 0.1046 - accuracy: 0.9592 - val_loss: 0.1362 - val_accuracy: 0.9602
Epoch 44/50
43/43 [==============================] - 98s 2s/step - loss: 0.0961 - accuracy: 0.9599 - val_loss: 0.0936 - val_accuracy: 0.9671
Epoch 45/50
43/43 [==============================] - 96s 2s/step - loss: 0.0990 - accuracy: 0.9577 - val_loss: 0.0851 - val_accuracy: 0.9689
Epoch 46/50
43/43 [==============================] - 101s 2s/step - loss: 0.0795 - accuracy: 0.9673 - val_loss: 0.0905 - val_accuracy: 0.9689
Epoch 47/50
43/43 [==============================] - 102s 2s/step - loss: 0.0921 - accuracy: 0.9666 - val_loss: 0.1430 - val_accuracy: 0.9637
Epoch 48/50
43/43 [==============================] - 100s 2s/step - loss: 0.0807 - accuracy: 0.9681 - val_loss: 0.1150 - val_accuracy: 0.9619
Epoch 49/50
43/43 [==============================] - 98s 2s/step - loss: 0.0757 - accuracy: 0.9696 - val_loss: 0.1108 - val_accuracy: 0.9689
Epoch 50/50
43/43 [==============================] - 98s 2s/step - loss: 0.1014 - accuracy: 0.9636 - val_loss: 0.1234 - val_accuracy: 0.9654
history¶
accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "b", label="trainning accuracy")
plt.plot(epochs, val_accuracy, "r", label="validation accuracy")
plt.legend()
plt.show()
plt.plot(epochs, loss, "b", label="tranning loss")
plt.plot(epochs, val_loss, "r", label="validation loss")
plt.legend()
plt.show()
모델 저장¶
model.save("drowiness_new6.h5")
C:\Users\KDT103\anaconda3\lib\site-packages\keras\src\engine\training.py:3000: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
saving_api.save_model(
model.save("drowiness_new6.keras")
모델 예측¶
predictions = model.predict(X_test)
predicted_classes = np.argmax(predictions, axis=1)
predicted_classes
19/19 [==============================] - 9s 457ms/step
array([3, 3, 2, 3, 1, 3, 3, 2, 0, 2, 3, 3, 3, 2, 2, 2, 3, 1, 0, 3, 0, 3,
2, 1, 1, 2, 2, 2, 2, 2, 3, 2, 3, 2, 0, 3, 3, 2, 1, 3, 2, 3, 2, 3,
2, 2, 3, 3, 3, 3, 3, 1, 2, 1, 3, 3, 2, 2, 2, 0, 3, 3, 0, 2, 2, 3,
2, 3, 2, 2, 1, 3, 3, 3, 2, 0, 0, 3, 1, 3, 2, 0, 3, 2, 2, 2, 2, 1,
3, 0, 2, 3, 3, 3, 1, 0, 3, 0, 3, 3, 3, 1, 3, 1, 2, 3, 2, 1, 2, 2,
1, 1, 2, 0, 0, 3, 3, 3, 2, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 0, 3, 2,
0, 3, 1, 2, 3, 3, 2, 1, 2, 2, 2, 1, 3, 3, 2, 3, 2, 0, 2, 3, 3, 3,
3, 0, 1, 0, 3, 1, 3, 2, 2, 3, 2, 3, 3, 2, 3, 2, 3, 1, 2, 0, 3, 2,
2, 2, 0, 2, 3, 0, 2, 1, 3, 2, 3, 1, 2, 0, 1, 3, 2, 2, 3, 3, 3, 0,
3, 2, 3, 0, 2, 0, 0, 1, 2, 3, 2, 0, 3, 1, 2, 2, 2, 2, 1, 3, 1, 1,
1, 0, 2, 3, 3, 3, 3, 0, 1, 2, 1, 0, 3, 3, 0, 3, 3, 1, 3, 3, 0, 0,
0, 0, 3, 3, 3, 3, 3, 2, 3, 3, 2, 3, 2, 0, 2, 2, 2, 0, 3, 3, 2, 2,
3, 2, 2, 0, 2, 3, 2, 2, 2, 3, 3, 3, 3, 3, 0, 1, 0, 3, 3, 3, 3, 2,
2, 1, 2, 2, 2, 3, 2, 0, 2, 2, 1, 3, 2, 3, 1, 3, 3, 0, 3, 3, 0, 3,
3, 2, 2, 2, 1, 1, 0, 3, 3, 3, 3, 1, 3, 0, 3, 2, 2, 1, 3, 3, 3, 2,
3, 3, 2, 3, 3, 3, 3, 2, 1, 3, 2, 2, 2, 1, 2, 0, 3, 3, 2, 3, 2, 2,
2, 2, 1, 2, 0, 2, 1, 0, 2, 2, 2, 1, 3, 2, 3, 3, 3, 2, 2, 2, 2, 2,
2, 3, 0, 3, 3, 0, 0, 1, 3, 2, 2, 1, 0, 2, 2, 3, 2, 1, 3, 2, 3, 3,
3, 0, 0, 3, 0, 2, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 0, 2, 3, 2, 3, 1,
1, 3, 2, 2, 3, 1, 3, 2, 2, 2, 3, 2, 1, 2, 1, 1, 2, 2, 1, 3, 2, 3,
3, 3, 2, 2, 3, 2, 1, 1, 2, 2, 2, 2, 2, 0, 2, 3, 3, 2, 0, 3, 2, 2,
2, 2, 3, 3, 0, 0, 1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 2, 0, 2, 3, 2,
2, 2, 2, 3, 2, 3, 2, 2, 3, 2, 3, 2, 3, 1, 0, 2, 2, 3, 2, 3, 0, 1,
1, 3, 1, 0, 3, 2, 2, 0, 2, 2, 3, 2, 0, 3, 3, 3, 3, 3, 0, 3, 2, 3,
2, 2, 3, 2, 2, 2, 3, 3, 3, 1, 2, 1, 3, 3, 1, 0, 3, 3, 3, 2, 2, 0,
3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 3, 2, 2, 3, 3, 2, 3, 2, 3, 2, 2,
3, 2, 2, 3, 2, 2], dtype=int64)
분류하기¶
labels_new = ["yawn", "no_yawn", "Closed", "Open"]
from sklearn.metrics import classification_report
print(classification_report(np.argmax(y_test, axis=1), predicted_classes, target_names = labels_new))
precision recall f1-score support
yawn 0.80 0.89 0.84 63
no_yawn 0.89 0.85 0.87 74
Closed 0.96 0.98 0.97 215
Open 0.98 0.94 0.96 226
accuracy 0.94 578
macro avg 0.91 0.91 0.91 578
weighted avg 0.94 0.94 0.94 578
기능 예측하기¶
labels_new = ["yawn", "no_yawn", "Closed", "Open"]
IMG_SIZE = 145
def prepare(filepath, face_cas = "haarcascade_frontalface_default.xml"):
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
img_array = img_array / 255
resized_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model("drowiness_new6.keras", compile=False)
prediction = model.predict([prepare("archive/train/no_yawn/1067.jpg")])
np.argmax(prediction)
1/1 [==============================] - 0s 77ms/step
2
prediction = model.predict([prepare("archive/train/Closed/_101.jpg")])
np.argmax(prediction)
1/1 [==============================] - 0s 35ms/step
2
prediction = model.predict([prepare("archive/train/Open/_104.jpg")])
np.argmax(prediction)
1/1 [==============================] - 0s 32ms/step
3
prediction = model.predict([prepare("archive/train/yawn/113.jpg")])
np.argmax(prediction)
1/1 [==============================] - 0s 31ms/step
3
모델 생성하는데 한 30분 넘게 걸린 것 같다.
만난 오류
1. 모델을 로드하려고 하는데 UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 31: invalid start byte 유니코드 에러가 나왔다.
오류난 부분
labels_new = ["yawn", "no_yawn", "Closed", "Open"]
IMG_SIZE = 145
def prepare(filepath, face_cas = "haarcascade_frontalface_default.xml"):
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
img_array = img_array / 255
resized_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model("drowiness_new6.h5")
해결방법
h5가 아닌 keras 형식으로 모델을 저장하고 불러오는 식으로 해결했다.
labels_new = ["yawn", "no_yawn", "Closed", "Open"]
IMG_SIZE = 145
def prepare(filepath, face_cas = "haarcascade_frontalface_default.xml"):
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
img_array = img_array / 255
resized_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model("drowiness_new6.keras", compile=False)
2. prediction = model.predict_classes(X_test) 이 코드에서 AttributeError: 'Sequential' object has no attribute 'predict_classes' 이 오류가 생겼다.
해결방법
TensorFlow 및 Keras의 최신 버전에서 predict_classes 함수는 Sequential 모델에서 제거되었다고 한다.
대신에, predict 함수를 사용하여 클래스 확률을 얻은 다음, 최대 확률의 인덱스를 사용하여 예측된 클래스를 찾을 수 있다.
다음과 같이 predict 함수와 numpy의 argmax 함수를 사용하여 동일한 작업을 수행할 수 있다:
import numpy as np
predictions = model.predict(X_test)
predicted_classes = np.argmax(predictions, axis=1)
이제 모델을 생성했으니 내일은 이 모델을 사용하여 졸음방지 코드를 작성할 예정이다.
- gui 제작
- 웹캠으로 얼굴 인식하기
- 눈 감은것/뜬 것 인식시키기
'개발공부 > 머신러닝,딥러닝' 카테고리의 다른 글
mediapipe 사용하여 팔굽히기 모션 인식하기 (0) | 2023.09.08 |
---|---|
[Python] 로지스틱 회귀(LogisticRegression) 이용하여 붓꽃 데이터 분석 (0) | 2023.08.18 |
[Python] 선형 회귀(Linear Regression) 이용하여 보스턴 하우스 집값 예측하기 (0) | 2023.08.17 |
[Python] sklearn 모듈 KNeighborsClassifier 알고리즘으로 도미, 빙어 구분하기 (0) | 2023.08.16 |