火灾如果发现得早的话可以及时扑灭,减少了损失,一般的大厦中都有火灾烟雾感应器,一旦检测到烟雾超过一定的量就会喷水灭火,那么可不可以通过摄像头和python来试试检测火灾呢,是可以的,今天小编来教大家如何用python识别火灾。
▊ 模型构建
我们今天使用专门检测火灾与烟雾的神经网络FireDetectionNet,该网络利用深度可分离卷积而不是标准卷积作为深度可分离卷积,它有以下优点:
★效率更高,因为Edge / IoT物联网设备将具有有限的CPU和功耗。
★需要更少的内存,同样,Edge / IoT物联网设备的RAM有限。
★由于我们的CPU能力有限,因此需要较少的计算。
★在某些情况下,其性能可能比标准卷积更好,更适合火灾/烟雾探测器。
下面是模型代码:
# import the necessary packages from tensorflow.keras.models import Sequential from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.layers import SeparableConv2D from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Dense class FireDetectionNet: @staticmethod def build(width, height, depth, classes): # initialize the model along with the input shape to be # "channels last" and the channels dimension itself model = Sequential() inputShape = (height, width, depth) chanDim = -1 # CONV => RELU => POOL model.add(SeparableConv2D(16, (7, 7), padding="same", input_shape=inputShape)) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) # CONV => RELU => POOL model.add(SeparableConv2D(32, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) # (CONV => RELU) * 2 => POOL model.add(SeparableConv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(SeparableConv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) # first set of FC => RELU layers model.add(Flatten()) model.add(Dense(128)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # second set of FC => RELU layers model.add(Dense(128)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # softmax classifier model.add(Dense(classes)) model.add(Activation("softmax")) # return the constructed network architecture return model
# set the matplotlib backend so figures can be saved in the background import matplotlib matplotlib.use("Agg") # import the necessary packages from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import SGD from tensorflow.keras.utils import to_categorical from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from pyimagesearch.learningratefinder import LearningRateFinder from pyimagesearch.firedetectionnet import FireDetectionNet from pyimagesearch import config from imutils import paths import matplotlib.pyplot as plt import numpy as np import argparse import cv2 import sys def load_dataset(datasetPath): # grab the paths to all images in our dataset directory, then # initialize our lists of images imagePaths = list(paths.list_images(datasetPath)) data = [] # loop over the image paths for imagePath in imagePaths: # load the image and resize it to be a fixed 128x128 pixels, # ignoring aspect ratio image = cv2.imread(imagePath) image = cv2.resize(image, (128, 128)) # add the image to the data lists data.append(image) # return the data list as a NumPy array return np.array(data, dtype="float32") # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--lr-find", type=int, default=0, help="whether or not to find optimal learning rate") args = vars(ap.parse_args()) # load the fire and non-fire images print("[INFO] loading data...") fireData = load_dataset(config.FIRE_PATH) nonFireData = load_dataset(config.NON_FIRE_PATH) # construct the class labels for the data fireLabels = np.ones((fireData.shape[0],)) nonFireLabels = np.zeros((nonFireData.shape[0],)) # stack the fire data with the non-fire data, then scale the data # to the range [0, 1] data = np.vstack([fireData, nonFireData]) labels = np.hstack([fireLabels, nonFireLabels]) data /= 255 # perform one-hot encoding on the labels and account for skew in the # labeled data labels = to_categorical(labels, num_classes=2) classTotals = labels.sum(axis=0) classWeight = classTotals.max() / classTotals # construct the training and testing split (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=config.TEST_SPLIT, random_state=42) # initialize the training data augmentation object aug = ImageDataGenerator( rotation_range=30, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest") # initialize the optimizer and model print("[INFO] compiling model...") opt = SGD(lr=config.INIT_LR, momentum=0.9, decay=config.INIT_LR / config.NUM_EPOCHS) model = FireDetectionNet.build(width=128, height=128, depth=3, classes=2) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # check to see if we are attempting to find an optimal learning rate # before training for the full number of epochs if args["lr_find"] > 0: # initialize the learning rate finder and then train with learning # rates ranging from 1e-10 to 1e+1 print("[INFO] finding learning rate...") lrf = LearningRateFinder(model) lrf.find( aug.flow(trainX, trainY, batch_size=config.BATCH_SIZE), 1e-10, 1e+1, stepsPerEpoch=np.ceil((trainX.shape[0] / float(config.BATCH_SIZE))), epochs=20, batchSize=config.BATCH_SIZE, classWeight=classWeight) # plot the loss for the various learning rates and save the # resulting plot to disk lrf.plot_loss() plt.savefig(config.LRFIND_PLOT_PATH) # gracefully exit the script so we can adjust our learning rates # in the config and then train the network for our full set of # epochs print("[INFO] learning rate finder complete") print("[INFO] examine plot and adjust learning rates before training") sys.exit(0) # train the network print("[INFO] training network...") H = model.fit_generator( aug.flow(trainX, trainY, batch_size=config.BATCH_SIZE), validation_data=(testX, testY), steps_per_epoch=trainX.shape[0] // config.BATCH_SIZE, epochs=config.NUM_EPOCHS, class_weight=classWeight, verbose=1) # evaluate the network and show a classification report print("[INFO] evaluating network...") predictions = model.predict(testX, batch_size=config.BATCH_SIZE) print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=config.CLASSES)) # serialize the model to disk print("[INFO] serializing network to '{}'...".format(config.MODEL_PATH)) model.save(config.MODEL_PATH) # construct a plot that plots and saves the training history N = np.arange(0, config.NUM_EPOCHS) plt.style.use("ggplot") plt.figure() plt.plot(N, H.history["loss"], label="train_loss") plt.plot(N, H.history["val_loss"], label="val_loss") plt.plot(N, H.history["accuracy"], label="train_acc") plt.plot(N, H.history["val_accuracy"], label="val_acc") plt.title("Training Loss and Accuracy") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") plt.savefig(config.TRAINING_PLOT_PATH)我们训练脚本
▊ 预测
# import the necessary packages
from tensorflow.keras.models import load_model
from pyimagesearch import config
from imutils import paths
import numpy as np
import imutils
import random
import cv2
import os
# load the trained model from disk
print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)
# grab the paths to the fire and non-fire images, respectively
print("[INFO] predicting...")
firePaths = list(paths.list_images(config.FIRE_PATH))
nonFirePaths = list(paths.list_images(config.NON_FIRE_PATH))
# combine the two image path lists, randomly shuffle them, and sample
# them
imagePaths = firePaths + nonFirePaths
random.shuffle(imagePaths)
imagePaths = imagePaths[:config.SAMPLE_SIZE]
# loop over the sampled image paths
for (i, imagePath) in enumerate(imagePaths):
# load the image and clone it
image = cv2.imread(imagePath)
output = image.copy()
# resize the input image to be a fixed 128x128 pixels, ignoring
# aspect ratio
image = cv2.resize(image, (128, 128))
image = image.astype("float32") / 255.0
# make predictions on the image
preds = model.predict(np.expand_dims(image, axis=0))[0]
j = np.argmax(preds)
label = config.CLASSES[j]
# draw the activity on the output frame
text = label if label == "Non-Fire" else "WARNING! Fire!"
output = imutils.resize(output, width=500)
cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,
1.25, (0, 255, 0), 5)
# write the output image to disk
filename = "{}.png".format(i)
p = os.path.sep.join([config.OUTPUT_IMAGE_PATH, filename])
cv2.imwrite(p, output)
运行预测程序
$ python predict_fire.py
网友评论0