diff --git a/sedna-deploy/check1.png b/sedna-deploy/check1.png new file mode 100644 index 00000000..8bb3803c Binary files /dev/null and b/sedna-deploy/check1.png differ diff --git a/sedna-deploy/check2.png b/sedna-deploy/check2.png new file mode 100644 index 00000000..5175303a Binary files /dev/null and b/sedna-deploy/check2.png differ diff --git a/sedna-deploy/doc.pdf b/sedna-deploy/doc.pdf new file mode 100644 index 00000000..ecd38f33 Binary files /dev/null and b/sedna-deploy/doc.pdf differ diff --git a/sedna-deploy/joint_inference/helmet_detection_inference/README.md b/sedna-deploy/joint_inference/helmet_detection_inference/README.md new file mode 100644 index 00000000..9fe80aae --- /dev/null +++ b/sedna-deploy/joint_inference/helmet_detection_inference/README.md @@ -0,0 +1,195 @@ +# Using Joint Inference Service in Helmet Detection Scenario + +This case introduces how to use joint inference service in helmet detection scenario. +In the safety helmet detection scenario, the helmet detection shows lower performance due to limited resources in edge. +However, the joint inference service can improve overall performance, which uploads hard examples that identified by the hard example mining algorithm to the cloud and infers them. +The data used in the experiment is a video of workers wearing safety helmets. +The joint inference service requires to detect the wearing of safety helmets in the video. + +## Helmet Detection Experiment + +### Install Sedna + +Follow the [Sedna installation document](/docs/setup/install.md) to install Sedna. + +### Prepare Data and Model + +* step1: download [little model](https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/helmet-detection-inference/little-model.tar.gz) to your edge node. + +``` +mkdir -p /data/little-model +cd /data/little-model +wget https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/helmet-detection-inference/little-model.tar.gz +tar -zxvf little-model.tar.gz +``` + +* step2: download [big model](https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/helmet-detection-inference/big-model.tar.gz) to your cloud node. + +``` +mkdir -p /data/big-model +cd /data/big-model +wget https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/helmet-detection-inference/big-model.tar.gz +tar -zxvf big-model.tar.gz +``` + +### Prepare Images +This example uses these images: +1. little model inference worker: ```kubeedge/sedna-example-joint-inference-helmet-detection-little:v0.3.0``` +2. big model inference worker: ```kubeedge/sedna-example-joint-inference-helmet-detection-big:v0.3.0``` + +These images are generated by the script [build_images.sh](/examples/build_image.sh). + +### Create Joint Inference Service + +#### Create Big Model Resource Object for Cloud + +``` +kubectl create -f - < pred_coor[:, 2]), + (pred_coor[:, 1] > pred_coor[:, 3])) + pred_coor[invalid_mask] = 0 + + # discard some invalidboxes + bboxes_scale = np.sqrt( + np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1)) + scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), + (bboxes_scale < valid_scale[1])) + + # discard some boxes with low scores + classes = np.argmax(pred_prob, axis=-1) + scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes] + score_mask = scores > score_threshold + + mask = score_mask + coors, scores, classes = pred_coor[mask], scores[mask], classes[mask] + + bboxes = np.concatenate( + [coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1) + bboxes = nms(bboxes, 0.4) + return bboxes + + +def bboxes_iou(boxes1, boxes2): + boxes1 = np.array(boxes1) + boxes2 = np.array(boxes2) + + boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * ( + boxes1[..., 3] - boxes1[..., 1]) + boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * ( + boxes2[..., 3] - boxes2[..., 1]) + + left_up = np.maximum(boxes1[..., :2], boxes2[..., :2]) + right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:]) + + inter_section = np.maximum(right_down - left_up, 0.0) + inter_area = inter_section[..., 0] * inter_section[..., 1] + union_area = boxes1_area + boxes2_area - inter_area + ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps) + + return ious + + +def nms(bboxes, iou_threshold, sigma=0.3, method='nms'): + """ + :param bboxes: (xmin, ymin, xmax, ymax, score, class) + Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf + https://github.com/bharatsingh430/soft-nms + """ + classes_in_img = list(set(bboxes[:, 5])) + best_bboxes = [] + + for cls in classes_in_img: + cls_mask = (bboxes[:, 5] == cls) + cls_bboxes = bboxes[cls_mask] + + while len(cls_bboxes) > 0: + max_ind = np.argmax(cls_bboxes[:, 4]) + best_bbox = cls_bboxes[max_ind] + best_bbox_ = best_bbox.tolist() + + # cast into int for cls + best_bbox_[5] = int(best_bbox[5]) + + best_bboxes.append(best_bbox_) + cls_bboxes = np.concatenate( + [cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]]) + iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4]) + weight = np.ones((len(iou),), dtype=np.float32) + + assert method in ['nms', 'soft-nms'] + + if method == 'nms': + iou_mask = iou > iou_threshold + weight[iou_mask] = 0.0 + + if method == 'soft-nms': + weight = np.exp(-(1.0 * iou ** 2 / sigma)) + + cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight + score_mask = cls_bboxes[:, 4] > 0. + cls_bboxes = cls_bboxes[score_mask] + + return best_bboxes + + +def create_input_feed(sess, img_data, new_image=None): + input_feed = {} + + input_img_data = sess.graph.get_tensor_by_name('input/input_data:0') + input_feed[input_img_data] = img_data + + return input_feed + + +def create_output_fetch(sess): + """Create output fetch for edge model inference""" + pred_sbbox = sess.graph.get_tensor_by_name('pred_sbbox/concat_2:0') + pred_mbbox = sess.graph.get_tensor_by_name('pred_mbbox/concat_2:0') + pred_lbbox = sess.graph.get_tensor_by_name('pred_lbbox/concat_2:0') + + output_fetch = [pred_sbbox, pred_mbbox, pred_lbbox] + return output_fetch + + +class Estimator: + + def __init__(self, **kwargs): + """ + initialize logging configuration + """ + graph = tf.Graph() + config = tf.ConfigProto(allow_soft_placement=True) + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction = 0.1 + self.session = tf.Session(graph=graph, config=config) + self.input_shape = [544, 544] + self.create_input_feed = create_input_feed + self.create_output_fetch = create_output_fetch + + def load(self, model_url=""): + with self.session.as_default(): + with self.session.graph.as_default(): + with tf.gfile.FastGFile(model_url, 'rb') as handle: + LOG.info(f"Load model {model_url}, " + f"ParseFromString start .......") + graph_def = tf.GraphDef() + graph_def.ParseFromString(handle.read()) + LOG.info("ParseFromString end .......") + + tf.import_graph_def(graph_def, name='') + LOG.info("Import_graph_def end .......") + + LOG.info("Import model from pb end .......") + + def predict(self, data, **kwargs): + img_data_np = np.array(data) + new_image, shapes = preprocess(img_data_np, self.input_shape) + with self.session.as_default(): + input_feed = self.create_input_feed( + self.session, new_image, img_data_np) + output_fetch = self.create_output_fetch(self.session) + output = self.session.run(output_fetch, input_feed) + return postprocess(output, shapes) diff --git a/sedna-deploy/joint_inference/helmet_detection_inference/helmet_detection_inference.yaml b/sedna-deploy/joint_inference/helmet_detection_inference/helmet_detection_inference.yaml new file mode 100644 index 00000000..9ecb4430 --- /dev/null +++ b/sedna-deploy/joint_inference/helmet_detection_inference/helmet_detection_inference.yaml @@ -0,0 +1,66 @@ +apiVersion: sedna.io/v1alpha1 +kind: JointInferenceService +metadata: + name: helmet-detection-inference-example + namespace: default +spec: + edgeWorker: + model: + name: "helmet-detection-inference-little-model" + hardExampleMining: + name: "IBT" + parameters: + - key: "threshold_img" + value: "0.9" + - key: "threshold_box" + value: "0.9" + template: + spec: + nodeName: $EDGE_NODE + containers: + - image: kubeedge/sedna-example-joint-inference-helmet-detection-little:v0.3.0 + imagePullPolicy: IfNotPresent + name: little-model + env: # user defined environments + - name: input_shape + value: "416,736" + - name: "video_url" + value: "rtsp://localhost/video" + - name: "all_examples_inference_output" + value: "/data/output" + - name: "hard_example_cloud_inference_output" + value: "/data/hard_example_cloud_inference_output" + - name: "hard_example_edge_inference_output" + value: "/data/hard_example_edge_inference_output" + resources: # user defined resources + requests: + memory: 64M + cpu: 100m + limits: + memory: 2Gi + volumeMounts: + - name: outputdir + mountPath: /data/ + volumes: # user defined volumes + - name: outputdir + hostPath: + # user must create the directory in host + path: /joint_inference/output + type: Directory + + cloudWorker: + model: + name: "helmet-detection-inference-big-model" + template: + spec: + nodeName: $CLOUD_NODE + containers: + - image: kubeedge/sedna-example-joint-inference-helmet-detection-big:v0.3.0 + name: big-model + imagePullPolicy: IfNotPresent + env: # user defined environments + - name: "input_shape" + value: "544,544" + resources: # user defined resources + requests: + memory: 2Gi diff --git a/sedna-deploy/joint_inference/helmet_detection_inference/images/inference-result.png b/sedna-deploy/joint_inference/helmet_detection_inference/images/inference-result.png new file mode 100644 index 00000000..4a1e5d0b Binary files /dev/null and b/sedna-deploy/joint_inference/helmet_detection_inference/images/inference-result.png differ diff --git a/sedna-deploy/joint_inference/helmet_detection_inference/little_model/interface.py b/sedna-deploy/joint_inference/helmet_detection_inference/little_model/interface.py new file mode 100644 index 00000000..e927a9b2 --- /dev/null +++ b/sedna-deploy/joint_inference/helmet_detection_inference/little_model/interface.py @@ -0,0 +1,124 @@ +# Copyright 2021 The KubeEdge Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging + +import cv2 +import numpy as np +import tensorflow as tf + +LOG = logging.getLogger(__name__) +os.environ['BACKEND_TYPE'] = 'TENSORFLOW' +flags = tf.flags.FLAGS + + +def create_input_feed(sess, new_image, img_data): + """Create input feed for edge model inference""" + input_feed = {} + + input_img_data = sess.graph.get_tensor_by_name('images:0') + input_feed[input_img_data] = new_image + + input_img_shape = sess.graph.get_tensor_by_name('shapes:0') + input_feed[input_img_shape] = [img_data.shape[0], img_data.shape[1]] + + return input_feed + + +def create_output_fetch(sess): + """Create output fetch for edge model inference""" + output_classes = sess.graph.get_tensor_by_name('concat_19:0') + output_scores = sess.graph.get_tensor_by_name('concat_18:0') + output_boxes = sess.graph.get_tensor_by_name('concat_17:0') + + output_fetch = [output_classes, output_scores, output_boxes] + return output_fetch + + +class Estimator: + + def __init__(self, **kwargs): + """ + initialize logging configuration + """ + graph = tf.Graph() + config = tf.ConfigProto(allow_soft_placement=True) + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction = 0.1 + self.session = tf.Session(graph=graph, config=config) + self.input_shape = [416, 736] + self.create_input_feed = create_input_feed + self.create_output_fetch = create_output_fetch + + def load(self, model_url=""): + with self.session.as_default(): + with self.session.graph.as_default(): + with tf.gfile.FastGFile(model_url, 'rb') as handle: + LOG.info(f"Load model {model_url}, " + f"ParseFromString start .......") + graph_def = tf.GraphDef() + graph_def.ParseFromString(handle.read()) + LOG.info("ParseFromString end .......") + + tf.import_graph_def(graph_def, name='') + LOG.info("Import_graph_def end .......") + + LOG.info("Import model from pb end .......") + + @staticmethod + def preprocess(image, input_shape): + """Preprocess functions in edge model inference""" + + # resize image with unchanged aspect ratio using padding by opencv + + h, w, _ = image.shape + + input_h, input_w = input_shape + scale = min(float(input_w) / float(w), float(input_h) / float(h)) + nw = int(w * scale) + nh = int(h * scale) + + image = cv2.resize(image.astype(np.float32), (nw, nh)) + + new_image = np.zeros((input_h, input_w, 3), np.float32) + new_image.fill(128) + bh, bw, _ = new_image.shape + new_image[int((bh - nh) / 2):(nh + int((bh - nh) / 2)), + int((bw - nw) / 2):(nw + int((bw - nw) / 2)), :] = image + + new_image /= 255. + new_image = np.expand_dims(new_image, 0) # Add batch dimension. + return new_image + + @staticmethod + def postprocess(model_output): + all_classes, all_scores, all_bboxes = model_output + bboxes = [] + for c, s, bbox in zip(all_classes, all_scores, all_bboxes): + bbox[0], bbox[1], bbox[2], bbox[3] = bbox[1].tolist( + ), bbox[0].tolist(), bbox[3].tolist(), bbox[2].tolist() + bboxes.append(bbox.tolist() + [s.tolist(), c.tolist()]) + + return bboxes + + def predict(self, data, **kwargs): + img_data_np = np.array(data) + with self.session.as_default(): + new_image = self.preprocess(img_data_np, self.input_shape) + input_feed = self.create_input_feed( + self.session, new_image, img_data_np) + output_fetch = self.create_output_fetch(self.session) + output = self.session.run(output_fetch, input_feed) + return self.postprocess(output) diff --git a/sedna-deploy/joint_inference/helmet_detection_inference/little_model/little_model.py b/sedna-deploy/joint_inference/helmet_detection_inference/little_model/little_model.py new file mode 100644 index 00000000..f4c43fb3 --- /dev/null +++ b/sedna-deploy/joint_inference/helmet_detection_inference/little_model/little_model.py @@ -0,0 +1,175 @@ +# Copyright 2021 The KubeEdge Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import copy +import logging + +import cv2 +import numpy as np + +from sedna.common.config import Context +from sedna.common.file_ops import FileOps +from sedna.core.joint_inference import JointInference + +from interface import Estimator + + +LOG = logging.getLogger(__name__) + +camera_address = Context.get_parameters('video_url') + +class_names = ['person', 'helmet', 'helmet_on', 'helmet_off'] +all_output_path = Context.get_parameters( + 'all_examples_inference_output' +) +hard_example_edge_output_path = Context.get_parameters( + 'hard_example_edge_inference_output' +) +hard_example_cloud_output_path = Context.get_parameters( + 'hard_example_cloud_inference_output' +) + +FileOps.clean_folder([ + all_output_path, + hard_example_cloud_output_path, + hard_example_edge_output_path +], clean=False) + + +def draw_boxes(img, bboxes, colors, text_thickness, box_thickness): + img_copy = copy.deepcopy(img) + + line_type = 2 + # get color code + colors = colors.split(",") + colors_code = [] + for color in colors: + if color == 'green': + colors_code.append((0, 255, 0)) + elif color == 'blue': + colors_code.append((255, 0, 0)) + elif color == 'yellow': + colors_code.append((0, 255, 255)) + else: + colors_code.append((0, 0, 255)) + + label_dict = {i: label for i, label in enumerate(class_names)} + + for bbox in bboxes: + if float("inf") in bbox or float("-inf") in bbox: + continue + label = int(bbox[5]) + score = "%.2f" % round(bbox[4], 2) + text = label_dict.get(label) + ":" + score + p1 = (int(bbox[1]), int(bbox[0])) + p2 = (int(bbox[3]), int(bbox[2])) + if (p2[0] - p1[0] < 1) or (p2[1] - p1[1] < 1): + continue + try: + cv2.rectangle(img_copy, p1[::-1], p2[::-1], colors_code[label], + box_thickness) + cv2.putText(img_copy, text, (p1[1], p1[0] + 20 * (label + 1)), + cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), + text_thickness, line_type) + except TypeError as err: + # error message from pyopencv, cv2.circle only can accept centre + # coordinates precision up to float32. If the coordinates are in + # float64, it will throw this error. + LOG.warning(f"Draw box fail: {err}") + return img_copy + + +def output_deal( + final_result, + is_hard_example, + cloud_result, + edge_result, + nframe, + img_rgb +): + # save and show image + img_rgb = np.array(img_rgb) + img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR) + collaboration_frame = draw_boxes(img_rgb, final_result, + colors="green,blue,yellow,red", + text_thickness=None, + box_thickness=None) + + cv2.imwrite(f"{all_output_path}/{nframe}.jpeg", collaboration_frame) + + # save hard example image to dir + if not is_hard_example: + return + + if cloud_result is not None: + cv2.imwrite(f"{hard_example_cloud_output_path}/{nframe}.jpeg", + collaboration_frame) + edge_collaboration_frame = draw_boxes( + img_rgb, + edge_result, + colors="green,blue,yellow,red", + text_thickness=None, + box_thickness=None) + cv2.imwrite(f"{hard_example_edge_output_path}/{nframe}.jpeg", + edge_collaboration_frame) + + +def main(): + + # get hard exmaple mining algorithm from config + hard_example_mining = JointInference.get_hem_algorithm_from_config( + threshold_img=0.9 + ) + + inference_instance = JointInference( + estimator=Estimator, + hard_example_mining=hard_example_mining + ) + + camera = cv2.VideoCapture(camera_address) + fps = 10 + nframe = 0 + while 1: + ret, input_yuv = camera.read() + if not ret: + LOG.info( + f"camera is not open, camera_address={camera_address}," + f" sleep 5 second.") + time.sleep(5) + camera = cv2.VideoCapture(camera_address) + continue + + if nframe % fps: + nframe += 1 + continue + + img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_BGR2RGB) + nframe += 1 + LOG.info(f"camera is open, current frame index is {nframe}") + is_hard_example, final_result, edge_result, cloud_result = ( + inference_instance.inference(img_rgb) + ) + output_deal( + final_result, + is_hard_example, + cloud_result, + edge_result, + nframe, + img_rgb + ) + + +if __name__ == '__main__': + main() diff --git a/sedna-deploy/testResult/exmple1TestResultImg.jpeg b/sedna-deploy/testResult/exmple1TestResultImg.jpeg new file mode 100644 index 00000000..88244814 Binary files /dev/null and b/sedna-deploy/testResult/exmple1TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple2TestResultImg.jpeg b/sedna-deploy/testResult/exmple2TestResultImg.jpeg new file mode 100644 index 00000000..a1750bdd Binary files /dev/null and b/sedna-deploy/testResult/exmple2TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple3TestResultImg.jpeg b/sedna-deploy/testResult/exmple3TestResultImg.jpeg new file mode 100644 index 00000000..49035162 Binary files /dev/null and b/sedna-deploy/testResult/exmple3TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple4TestResultImg.jpeg b/sedna-deploy/testResult/exmple4TestResultImg.jpeg new file mode 100644 index 00000000..3522fe00 Binary files /dev/null and b/sedna-deploy/testResult/exmple4TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple5TestResultImg.jpeg b/sedna-deploy/testResult/exmple5TestResultImg.jpeg new file mode 100644 index 00000000..568a2d3b Binary files /dev/null and b/sedna-deploy/testResult/exmple5TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple6TestResultImg.jpeg b/sedna-deploy/testResult/exmple6TestResultImg.jpeg new file mode 100644 index 00000000..882b4f21 Binary files /dev/null and b/sedna-deploy/testResult/exmple6TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple7TestResultImg.jpeg b/sedna-deploy/testResult/exmple7TestResultImg.jpeg new file mode 100644 index 00000000..be1fcc92 Binary files /dev/null and b/sedna-deploy/testResult/exmple7TestResultImg.jpeg differ diff --git a/sedna-deploy/testResult/exmple8TestResultImg.jpeg b/sedna-deploy/testResult/exmple8TestResultImg.jpeg new file mode 100644 index 00000000..ee6d61f4 Binary files /dev/null and b/sedna-deploy/testResult/exmple8TestResultImg.jpeg differ