当前位置 : 主页 > 编程语言 > python >

笑脸识别(加入空间注意机制)

来源:互联网 收集:自由互联 发布时间:2022-06-28
import os , shutil from tensorflow . keras import layers from tensorflow . keras import models from tensorflow . keras import optimizers , regularizers from tensorflow . keras . preprocessing . image import ImageDataGenerator import matplot
import os, shutil
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers, regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
import numpy as np
import glob

original_dataset_dir = 'smiledataset'

base_dir = 'smiledataset/smile_and_nosmile'

person_test_path = 'smiledataset/smile_and_nosmile/validation/smile'

def dataCollection_create_model():

train_dir = os.path.join(base_dir, 'train')
# os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
# os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
# os.mkdir(test_dir)

train_smile_dir = os.path.join(train_dir, 'smile')
# os.mkdir(train_smile_dir)

train_nosmile_dir = os.path.join(train_dir, 'nosmile')
# os.mkdir(train_nosmile_dir)

validation_smile_dir = os.path.join(validation_dir, 'smile')
# os.mkdir(validation_smile_dir)

validation_nosmile_dir = os.path.join(validation_dir, 'nosmile')
# os.mkdir(validation_nosmile_dir)

test_smile_dir = os.path.join(test_dir, 'smile')
# os.mkdir(test_smile_dir)

test_nosmile_dir = os.path.join(test_dir, 'nosmile')
# os.mkdir(test_nosmile_dir)

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()

model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])

train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True, )

# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')

history = model.fit_generator(
train_generator,
steps_per_epoch=1024,
epochs=100,
validation_data=validation_generator,
validation_steps=50)

model.save("smiledataset/smile.h5")

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()

datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# This is module with image preprocessing utilities

fnames = [os.path.join(train_smile_dir, fname) for fname in os.listdir(train_smile_dir)]

# We pick one image to "augment"
img_path = fnames[3]

# Read the image and resize it
img = image.load_img(img_path, target_size=(150, 150))

# Convert it to a Numpy array with shape (150, 150, 3)
x = image.img_to_array(img)

# Reshape it to (1, 150, 150, 3)
x = x.reshape((1,) + x.shape)

# The .flow() command below generates batches of randomly transformed images.
# It will loop indefinitely, so we need to `break` the loop at some point!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(image.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break

plt.show()

def read_list():
image_list= glob.glob(os.path.join(person_test_path, '*.jpg'))
replace_list = []
for value in image_list:
replace_list.append(value.replace("\\\\", "/"))
return replace_list

def use_model():

model = load_model('smiledataset/smile.h5')
img_path_list = read_list()
count_smile = 0
count_nosmile = 0
for img_path in img_path_list:
img2 = cv2.imread(img_path)

font = cv2.FONT_HERSHEY_COMPLEX

img = image.load_img(img_path, target_size=(150, 150))

img_tensor = image.img_to_array(img) / 255.0
img_tensor = np.expand_dims(img_tensor, axis=0)

prediction = model.predict(img_tensor)
print(prediction)
if prediction[0][0] > 0.5:
result = 'smile'
count_smile +=1
cv2.putText(img2,'smile',(50,150),font,1,(0,0,255),3)
else:
result = 'nosmile'
count_nosmile +=1
cv2.putText(img2,'nosmile',(50,150),font,1,(0, 0, 255),3)
# cv2.imshow('wname',img2)
# cv2.waitKey(0)
# cv2.imwrite("smiledataset/result")
(path, filename) = os.path.split(img_path)
cv2.imwrite("smiledataset/result/smile/"+filename, img2, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
print("总数:"+(str)(len(img_path_list))+'\\n'+"smile 数量:"+(str)(count_smile)+'\\n'+"nosmile 数量:"+(str)(count_nosmile))

if __name__ =='__main__':

dataCollection_create_model()
use_model()
# image_list = read_list()
上一篇:minist手写数据集识别
下一篇:没有了
网友评论