diff --git a/examples/mask-detector-workflow/blurry-faces.sh b/examples/mask-detector-workflow/blurry-faces.sh new file mode 100644 index 00000000..a6207520 --- /dev/null +++ b/examples/mask-detector-workflow/blurry-faces.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +VIDEO_NAME=`basename "$INPUT_FILE_PATH"` +SUBFOLDER_NAME=`echo "$VIDEO_NAME" | cut -f 1 -d '.'` +OUTPUT_SUBFOLDER="$TMP_OUTPUT_DIR/$SUBFOLDER_NAME" + +mkdir "$OUTPUT_SUBFOLDER" + +echo "SCRIPT: Analyzing file '$INPUT_FILE_PATH', saving the output images in '$OUTPUT_SUBFOLDER'" + +ffmpeg -i "$INPUT_FILE_PATH" -vf fps=12/60 "$OUTPUT_SUBFOLDER/img%d.jpg" + +for IMAGE in "$OUTPUT_SUBFOLDER"/* +do + python auto_blur_image.py -i "$IMAGE" -o "$IMAGE" +done \ No newline at end of file diff --git a/examples/mask-detector-workflow/blurry-faces/Dockerfile b/examples/mask-detector-workflow/blurry-faces/Dockerfile new file mode 100644 index 00000000..58d00a59 --- /dev/null +++ b/examples/mask-detector-workflow/blurry-faces/Dockerfile @@ -0,0 +1,15 @@ +FROM python:slim-buster + +RUN pip install --no-cache-dir opencv-python numpy tensorflow && \ + rm -rf /root/.cache/pip/* && \ + rm -rf /tmp/* + +RUN apt update && \ + apt install -y --no-install-recommends ffmpeg && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY . /opt/blurry-faces + +WORKDIR /opt/blurry-faces/src + diff --git a/examples/mask-detector-workflow/blurry-faces/src/DetectorAPI.py b/examples/mask-detector-workflow/blurry-faces/src/DetectorAPI.py new file mode 100644 index 00000000..97cc1153 --- /dev/null +++ b/examples/mask-detector-workflow/blurry-faces/src/DetectorAPI.py @@ -0,0 +1,64 @@ +import tensorflow as tf +import numpy as np +import time + +class DetectorAPI: + def __init__(self, path_to_ckpt): + self.path_to_ckpt = path_to_ckpt + + self.detection_graph = tf.Graph() + with self.detection_graph.as_default(): + od_graph_def = tf.compat.v1.GraphDef() + with tf.io.gfile.GFile(self.path_to_ckpt, 'rb') as fid: + serialized_graph = fid.read() + od_graph_def.ParseFromString(serialized_graph) + tf.import_graph_def(od_graph_def, name='') + + self.default_graph = self.detection_graph.as_default() + self.sess = tf.compat.v1.Session(graph=self.detection_graph) + + # Definite input and output Tensors for detection_graph + self.image_tensor = self.detection_graph.get_tensor_by_name( + 'image_tensor:0') + # Each box represents a part of the image where a particular object was detected. + self.detection_boxes = self.detection_graph.get_tensor_by_name( + 'detection_boxes:0') + # Each score represent how level of confidence for each of the objects. + # Score is shown on the result image, together with the class label. + self.detection_scores = self.detection_graph.get_tensor_by_name( + 'detection_scores:0') + self.detection_classes = self.detection_graph.get_tensor_by_name( + 'detection_classes:0') + self.num_detections = self.detection_graph.get_tensor_by_name( + 'num_detections:0') + + def processFrame(self, image): + # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3] + image_np_expanded = np.expand_dims(image, axis=0) + # Actual detection. + start_time = time.time() + (boxes, scores, classes, + num) = self.sess.run([ + self.detection_boxes, self.detection_scores, + self.detection_classes, self.num_detections + ], + feed_dict={self.image_tensor: image_np_expanded}) + end_time = time.time() + + print("Elapsed Time:", end_time - start_time) + + im_height, im_width, _ = image.shape + boxes_list = [None for i in range(boxes.shape[1])] + for i in range(boxes.shape[1]): + boxes_list[i] = (int(boxes[0, i, 1] * im_width), + int(boxes[0, i, 0] * im_height), + int(boxes[0, i, 3] * im_width), + int(boxes[0, i, 2] * im_height)) + + return boxes_list, scores[0].tolist(), [ + int(x) for x in classes[0].tolist() + ], int(num[0]) + + def close(self): + self.sess.close() + self.default_graph.close() diff --git a/examples/mask-detector-workflow/blurry-faces/src/auto_blur_image.py b/examples/mask-detector-workflow/blurry-faces/src/auto_blur_image.py new file mode 100644 index 00000000..ebcf4fd5 --- /dev/null +++ b/examples/mask-detector-workflow/blurry-faces/src/auto_blur_image.py @@ -0,0 +1,104 @@ +# author: Asmaa Mirkhan ~ 2019 + +import os +import argparse +import cv2 as cv +from DetectorAPI import DetectorAPI + +def blurBoxes(image, boxes): + """ + Argument: + image -- the image that will be edited as a matrix + boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, x_bottom_right, y_bottom_right) + + Returns: + image -- the blurred image as a matrix + """ + + for box in boxes: + # unpack each box + x1, y1, x2, y2 = [d for d in box] + + # crop the image due to the current box + sub = image[y1:y2, x1:x2] + + # apply GaussianBlur on cropped area + blur = cv.blur(sub, (10, 10)) + + # paste blurred image on the original image + image[y1:y2, x1:x2] = blur + + return image + + +def main(args): + # assign model path and threshold + model_path = args.model_path + threshold = args.threshold + + # create detection object + odapi = DetectorAPI(path_to_ckpt=model_path) + + # open image + image = cv.imread(args.input_image) + + # real face detection + boxes, scores, classes, num = odapi.processFrame(image) + + # filter boxes due to threshold + # boxes are in (x_top_left, y_top_left, x_bottom_right, y_bottom_right) format + boxes = [boxes[i] for i in range(0, num) if scores[i] > threshold] + + # apply blurring + image = blurBoxes(image, boxes) + + # # show image + # cv.imshow('blurred', image) + + # if image will be saved then save it + if args.output_image: + cv.imwrite(args.output_image, image) + print('Image has been saved successfully at', args.output_image, + 'path') + else: + cv.imshow('blurred', image) + # when any key has been pressed then close window and stop the program + cv.waitKey(0) + cv.destroyAllWindows() + + +if __name__ == "__main__": + # creating argument parser + parser = argparse.ArgumentParser(description='Image blurring parameters') + + # adding arguments + parser.add_argument('-i', + '--input_image', + help='Path to your image', + type=str, + required=True) + parser.add_argument('-m', + '--model_path', + default='/opt/blurry-faces/face_model/face.pb', + help='Path to .pb model', + type=str) + parser.add_argument('-o', + '--output_image', + help='Output file path', + type=str) + parser.add_argument('-t', + '--threshold', + help='Face detection confidence', + default=0.7, + type=float) + args = parser.parse_args() + print(args) + # if input image path is invalid then stop + assert os.path.isfile(args.input_image), 'Invalid input file' + + # if output directory is invalid then stop + if args.output_image: + assert os.path.isdir(os.path.dirname( + args.output_image)), 'No such directory' + + main(args) diff --git a/examples/mask-detector-workflow/blurry-faces/src/auto_blur_video.py b/examples/mask-detector-workflow/blurry-faces/src/auto_blur_video.py new file mode 100644 index 00000000..6f5fc600 --- /dev/null +++ b/examples/mask-detector-workflow/blurry-faces/src/auto_blur_video.py @@ -0,0 +1,125 @@ +# author: Asmaa Mirkhan ~ 2019 + +import os +import argparse +import cv2 as cv +from DetectorAPI import DetectorAPI + +def blurBoxes(image, boxes): + """ + Argument: + image -- the image that will be edited as a matrix + boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, x_bottom_right, y_bottom_right) + + Returns: + image -- the blurred image as a matrix + """ + + for box in boxes: + # unpack each box + x1, y1, x2, y2 = [d for d in box] + + # crop the image due to the current box + sub = image[y1:y2, x1:x2] + + # apply GaussianBlur on cropped area + blur = cv.blur(sub, (25, 25)) + + # paste blurred image on the original image + image[y1:y2, x1:x2] = blur + + return image + + +def main(args): + # assign model path and threshold + model_path = args.model_path + threshold = args.threshold + + # create detection object + odapi = DetectorAPI(path_to_ckpt=model_path) + + # open video + capture = cv.VideoCapture(args.input_video) + + # video width = capture.get(3) + # video height = capture.get(4) + # video fps = capture.get(5) + + if args.output_video: + fourcc = cv.VideoWriter_fourcc(*'mp4v') + output = cv.VideoWriter(args.output_video, fourcc, + 20.0, (int(capture.get(3)), int(capture.get(4)))) + + frame_counter = 0 + while True: + # read frame by frame + r, frame = capture.read() + frame_counter += 1 + + # the end of the video? + if frame is None: + break + + key = cv.waitKey(1) + if key & 0xFF == ord('q'): + break + # real face detection + boxes, scores, classes, num = odapi.processFrame(frame) + + # filter boxes due to threshold + # boxes are in (x_top_left, y_top_left, x_bottom_right, y_bottom_right) format + boxes = [boxes[i] for i in range(0, num) if scores[i] > threshold] + + # apply blurring + frame = blurBoxes(frame, boxes) + + # show image + cv.imshow('blurred', frame) + + # if image will be saved then save it + if args.output_video: + output.write(frame) + print('Blurred video has been saved successfully at', + args.output_video, 'path') + + # when any key has been pressed then close window and stop the program + + cv.destroyAllWindows() + + +if __name__ == "__main__": + # creating argument parser + parser = argparse.ArgumentParser(description='Image blurring parameters') + + # adding arguments + parser.add_argument('-i', + '--input_video', + help='Path to your video', + type=str, + required=True) + parser.add_argument('-m', + '--model_path', + help='Path to .pb model', + type=str, + required=True) + parser.add_argument('-o', + '--output_video', + help='Output file path', + type=str) + parser.add_argument('-t', + '--threshold', + help='Face detection confidence', + default=0.7, + type=float) + args = parser.parse_args() + print(args) + # if input image path is invalid then stop + assert os.path.isfile(args.input_video), 'Invalid input file' + + # if output directory is invalid then stop + if args.output_video: + assert os.path.isdir(os.path.dirname( + args.output_video)), 'No such directory' + + main(args) diff --git a/examples/mask-detector-workflow/blurry-faces/src/manual_blur_image.py b/examples/mask-detector-workflow/blurry-faces/src/manual_blur_image.py new file mode 100644 index 00000000..5b1a25e6 --- /dev/null +++ b/examples/mask-detector-workflow/blurry-faces/src/manual_blur_image.py @@ -0,0 +1,91 @@ +# author: Asmaa Mirkhan ~ 2019 + +import os +import argparse +import cv2 as cv + + +def blurBoxes(image, boxes): + """ + Argument: + image -- the image that will be edited as a matrix + boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, width, height) + + Returns: + image -- the blurred image as a matrix + """ + + for box in boxes: + # unpack each box + x,y,w,h = [d for d in box] + + # crop the image due to the current box + sub = image[y:y+h, x:x+w] + + # apply GaussianBlur on cropped area + blur = cv.GaussianBlur(sub, (23,23), 30) + + # paste blurred image on the original image + image[y:y+h, x:x+w] = blur + + return image + + +def main(args): + # open the image + image = cv.imread(args.input_image) + + # create a copy and do temp operations without affecting the original image + temp_image = image.copy() + + # an array to store selected regions coordinates + ROIs = [] + + # keep getting ROIs until pressing 'q' + while True: + # get ROI cv.selectROI(window_name, image_matrix, selecting_start_point) + box = cv.selectROI('blur', temp_image, fromCenter=False) + + # add selected box to box list + ROIs.append(box) + + # draw a rectangle on selected ROI + cv.rectangle(temp_image, (box[0],box[1]), (box[0]+box[2], box[1]+box[3]), (0,255,0), 3) + print('ROI is saved, press q to stop capturing, press any other key to select other ROI') + + # if 'q' is pressed then break + key = cv.waitKey(0) + if key & 0xFF == ord('q'): + break + + # apply blurring + image = blurBoxes(image, ROIs) + + # if image will be saved then save it + if args.output_image: + cv.imwrite(args.output_image,image) + cv.imshow('blurred',image) + cv.waitKey(0) + + + +if __name__ == "__main__": + # creating argument parser + parser = argparse.ArgumentParser(description='Image blurring parameters') + + # adding arguments + parser.add_argument('-i', '--input_image', + help='Path to your image', type=str, required=True) + parser.add_argument('-o', '--output_image', + help='Output file path', type=str) + args = parser.parse_args() + + # if input image path is invalid then stop + assert os.path.isfile(args.input_image), 'Invalid input file' + + # if output directory is invalid then stop + if args.output_image: + assert os.path.isdir(os.path.dirname( + args.output_image)), 'No such directory' + + main(args) diff --git a/examples/mask-detector-workflow/mask-detector.sh b/examples/mask-detector-workflow/mask-detector.sh new file mode 100644 index 00000000..3ad6892a --- /dev/null +++ b/examples/mask-detector-workflow/mask-detector.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +SUBFOLDER_NAME=`basename "$(dirname "$STORAGE_OBJECT_KEY")"` + +mkdir "$TMP_OUTPUT_DIR/$SUBFOLDER_NAME" + +IMAGE_NAME=`basename "$INPUT_FILE_PATH"` +OUTPUT_IMAGE="$TMP_OUTPUT_DIR/$SUBFOLDER_NAME/$IMAGE_NAME" + +echo "SCRIPT: Analyzing file '$INPUT_FILE_PATH', saving the output image in '$OUTPUT_IMAGE'" + +python mask-detector-image.py --image "$INPUT_FILE_PATH" --output "$OUTPUT_IMAGE" + diff --git a/examples/mask-detector-workflow/mask-detector/Dockerfile b/examples/mask-detector-workflow/mask-detector/Dockerfile new file mode 100644 index 00000000..779ccd97 --- /dev/null +++ b/examples/mask-detector-workflow/mask-detector/Dockerfile @@ -0,0 +1,18 @@ +FROM python:slim-buster + +RUN pip install --no-cache-dir opencv-python numpy && \ + rm -rf /root/.cache/pip/* && \ + rm -rf /tmp/* + +RUN apt update && \ + apt install -y --no-install-recommends libgl1 libglib2.0-0 && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +RUN mkdir /opt/mask-detector + +WORKDIR /opt/mask-detector + +COPY mask-detector-image.py . +COPY cfg cfg + diff --git a/examples/mask-detector-workflow/mask-detector/cfg/obj.names b/examples/mask-detector-workflow/mask-detector/cfg/obj.names new file mode 100644 index 00000000..34c34705 --- /dev/null +++ b/examples/mask-detector-workflow/mask-detector/cfg/obj.names @@ -0,0 +1,2 @@ +Mask +No Mask \ No newline at end of file diff --git a/examples/mask-detector-workflow/mask-detector/cfg/yolov3-tiny_obj_test.cfg b/examples/mask-detector-workflow/mask-detector/cfg/yolov3-tiny_obj_test.cfg new file mode 100644 index 00000000..32ed0eca --- /dev/null +++ b/examples/mask-detector-workflow/mask-detector/cfg/yolov3-tiny_obj_test.cfg @@ -0,0 +1,182 @@ +[net] +# Testing +batch=1 +subdivisions=1 +# Training +#batch=64 +#subdivisions=16 +width=416 +height=416 +channels=3 +momentum=0.9 +decay=0.0005 +angle=0 +saturation = 1.5 +exposure = 1.5 +hue=.1 + +learning_rate=0.001 +burn_in=1000 +max_batches = 5002000 +policy=steps +steps=400000,450000 +scales=.1,.1 + +[convolutional] +batch_normalize=1 +filters=16 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=32 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=1 + +[convolutional] +batch_normalize=1 +filters=1024 +size=3 +stride=1 +pad=1 +activation=leaky + +########### + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=21 +activation=linear + + + +[yolo] +mask = 3,4,5 +anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 +classes=2 +num=6 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 + +[route] +layers = -4 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[upsample] +stride=2 + +[route] +layers = -1, 8 + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=21 +activation=linear + +[yolo] +mask = 0,1,2 +anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 +classes=2 +num=6 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 diff --git a/examples/mask-detector-workflow/mask-detector/cfg/yolov3-tiny_obj_train.cfg b/examples/mask-detector-workflow/mask-detector/cfg/yolov3-tiny_obj_train.cfg new file mode 100644 index 00000000..a7a0d9ad --- /dev/null +++ b/examples/mask-detector-workflow/mask-detector/cfg/yolov3-tiny_obj_train.cfg @@ -0,0 +1,182 @@ +[net] +# Testing +#batch=1 +#subdivisions=1 +# Training +batch=64 +subdivisions=16 +width=416 +height=416 +channels=3 +momentum=0.9 +decay=0.0005 +angle=0 +saturation = 1.5 +exposure = 1.5 +hue=.1 + +learning_rate=0.001 +burn_in=1000 +max_batches = 50020000 +policy=steps +steps=4000,4500 +scales=.1,.1 + +[convolutional] +batch_normalize=1 +filters=16 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=32 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=64 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=128 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=2 + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=leaky + +[maxpool] +size=2 +stride=1 + +[convolutional] +batch_normalize=1 +filters=1024 +size=3 +stride=1 +pad=1 +activation=leaky + +########### + +[convolutional] +batch_normalize=1 +filters=256 +size=1 +stride=1 +pad=1 +activation=leaky + +[convolutional] +batch_normalize=1 +filters=512 +size=3 +stride=1 +pad=1 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=21 +activation=linear + + + +[yolo] +mask = 3,4,5 +anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 +classes=2 +num=6 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 + +[route] +layers = -4 + +[convolutional] +batch_normalize=1 +filters=128 +size=1 +stride=1 +pad=1 +activation=leaky + +[upsample] +stride=2 + +[route] +layers = -1, 8 + +[convolutional] +batch_normalize=1 +filters=256 +size=3 +stride=1 +pad=1 +activation=leaky + +[convolutional] +size=1 +stride=1 +pad=1 +filters=21 +activation=linear + +[yolo] +mask = 0,1,2 +anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 +classes=2 +num=6 +jitter=.3 +ignore_thresh = .7 +truth_thresh = 1 +random=1 diff --git a/examples/mask-detector-workflow/mask-detector/mask-detector-image.py b/examples/mask-detector-workflow/mask-detector/mask-detector-image.py new file mode 100644 index 00000000..6867c848 --- /dev/null +++ b/examples/mask-detector-workflow/mask-detector/mask-detector-image.py @@ -0,0 +1,141 @@ +# import the necessary libs +import numpy as np +import argparse +import time +import cv2 +import os + +# construct the argument parser and parse the arguments +ap = argparse.ArgumentParser() +ap.add_argument("-i", "--image", required=True, help="path to input image") +ap.add_argument("-o", "--output",help="path to output image") +ap.add_argument("-y", "--yolo", default="/opt/mask-detector/cfg", help="base path to YOLO cfg directory") +ap.add_argument("-c", "--confidence", type=float, default=0.2, help="minimum probability to filter weak detections") +ap.add_argument("-t", "--threshold", type=float, default=0.1, help="threshold when applying non-max suppression") +args = vars(ap.parse_args()) + +# load the class labels our YOLO model was trained on +labelsPath = os.path.sep.join([args["yolo"], "obj.names"]) +LABELS = open(labelsPath).read().strip().split("\n") + +# initialize a list of colors to represent each possible class label (red and green) +COLORS = [[0,255,0], [0,0,255]] + +# derive the paths to the YOLO weights and model configuration +weightsPath = os.path.sep.join([args["yolo"], "yolov3-tiny_obj_train_tiny8.weights"]) +configPath = os.path.sep.join([args["yolo"], "yolov3-tiny_obj_train.cfg"]) + +# load our YOLO object detector +print("[INFO] loading YOLO from disk...") +net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) + +# load our input image and get it height and width +image = cv2.imread(args["image"]) +(H, W) = image.shape[:2] + +# determine only the *output* layer names that we need from YOLO +ln = net.getLayerNames() + +ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] + +# construct a blob from the input image and then perform a forward +# pass of the YOLO object detector, giving us our bounding boxes and +# associated probabilities +blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),swapRB=True, crop=False) +net.setInput(blob) +start = time.time() +layerOutputs = net.forward(ln) #list of 3 arrays, for each output layer. +end = time.time() + +# show timing information on YOLO +print("[INFO] YOLO took {:.6f} seconds".format(end - start)) + +# initialize our lists of detected bounding boxes, confidences, and +# class IDs, respectively +boxes = [] +confidences = [] +classIDs = [] + +# loop over each of the layer outputs +for output in layerOutputs: + + # loop over each of the detections + for detection in output: + + # extract the class ID and confidence (i.e., probability) of + # the current object detection + scores = detection[5:] #last 2 values in vector + classID = np.argmax(scores) + confidence = scores[classID] + # filter out weak predictions by ensuring the detected + # probability is greater than the minimum probability + if confidence > args["confidence"]: + # scale the bounding box coordinates back relative to the + # size of the image, keeping in mind that YOLO actually + # returns the center (x, y)-coordinates of the bounding + # box followed by the boxes' width and height + box = detection[0:4] * np.array([W, H, W, H]) + (centerX, centerY, width, height) = box.astype("int") + # use the center (x, y)-coordinates to derive the top and + # and left corner of the bounding box + x = int(centerX - (width / 2)) + y = int(centerY - (height / 2)) + # update our list of bounding box coordinates, confidences, + # and class IDs + boxes.append([x, y, int(width), int(height)]) + confidences.append(float(confidence)) + classIDs.append(classID) + +# apply NMS to suppress weak, overlapping bounding +# boxes +idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],args["threshold"]) +border_size=100 +border_text_color=[255,255,255] +#Add top-border to image to display stats +image = cv2.copyMakeBorder(image, border_size,0,0,0, cv2.BORDER_CONSTANT) +#calculate count values +filtered_classids=np.take(classIDs,idxs) +mask_count=(filtered_classids==0).sum() +nomask_count=(filtered_classids==1).sum() +#display count +text = "NoMaskCount: {} MaskCount: {}".format(nomask_count, mask_count) +cv2.putText(image,text, (0, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,border_text_color, 2) +#display status +text = "Status:" +cv2.putText(image,text, (W-300, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,border_text_color, 2) +ratio=nomask_count/(mask_count+nomask_count) + +if ratio>=0.1 and nomask_count>=3: + text = "Danger !" + cv2.putText(image,text, (W-200, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,[26,13,247], 2) + +elif ratio!=0 and np.isnan(ratio)!=True: + text = "Warning !" + cv2.putText(image,text, (W-200, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,[0,255,255], 2) + +else: + text = "Safe " + cv2.putText(image,text, (W-200, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,[0,255,0], 2) + +# ensure at least one detection exists +if len(idxs) > 0: + + # loop over the indexes we are keeping + for i in idxs.flatten(): + + # extract the bounding box coordinates + (x, y) = (boxes[i][0], boxes[i][1]+border_size) + (w, h) = (boxes[i][2], boxes[i][3]) + # draw a bounding box rectangle and label on the image + color = [int(c) for c in COLORS[classIDs[i]]] + cv2.rectangle(image, (x, y), (x + w, y + h), color, 1) + text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i]) + cv2.putText(image, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 1) + +if args["output"]: + #save the image + cv2.imwrite(args["output"],image) +else: + # show the output image + cv2.imshow("Image",image) + cv2.waitKey(0) \ No newline at end of file diff --git a/examples/mask-detector-workflow/scar-mask-detector-workflow.yaml b/examples/mask-detector-workflow/scar-mask-detector-workflow.yaml new file mode 100644 index 00000000..757d998c --- /dev/null +++ b/examples/mask-detector-workflow/scar-mask-detector-workflow.yaml @@ -0,0 +1,37 @@ +functions: + aws: + - lambda: + name: scar-mask-detector + memory: 512 + init_script: mask-detector.sh + container: + image: grycap/mask-detector-yolo:mini + input: + - storage_provider: s3 + path: scar-mask-detector/input + output: + - storage_provider: s3 + path: scar-mask-detector/output + oscar: + - my_oscar: + name: oscar-anon-and-split + memory: 1Gi + cpu: '1.0' + image: grycap/blurry-faces + script: blurry-faces.sh + input: + - storage_provider: minio + path: oscar-anon-and-split/input + output: + - storage_provider: minio + path: oscar-anon-and-split/output + - storage_provider: s3.my_s3 + path: scar-mask-detector/input + +storage_providers: + s3: + my_s3: + access_key: xxxxxx + secret_key: xxxxxx + region: us-east-1 + diff --git a/fdl-example.yaml b/fdl-example.yaml index 8602d27d..7832ecde 100644 --- a/fdl-example.yaml +++ b/fdl-example.yaml @@ -176,14 +176,14 @@ functions: # CPU limit for the service following the kubernetes format # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu # Optional. (default: 0.2) - cpu: 1.0 + cpu: '1.0' # Log level for the FaaS Supervisor # Can be INFO, DEBUG, ERROR, WARNING # Default 'INFO' log_level: INFO # Container image to use. REQUIRED image: grycap/darknet - script: + script: my-script.sh # Environment variables of the function environment: Variables: