利用AidLux实现热成像电力巡检项目-lesson3

86 阅读2分钟

本项目为基于Aidlux+r-retinanet+tflite,在小米平板5上实现热成像电力训练项目。通过r-retinanet对绝缘子等电力设施进行旋转目标检测。

首先,需要把老师提供的onnx导出为tflite模型,利用onnx2tflite.py进行转换。部分代码如下:

import sys
sys.path.append("D:/wechat data/WeChat Files/wxid_ishz7g32wpon21/FileStorage/File/2023-04/onnx2tflite(1)/onnx2tflite")
from converter import onnx_converter

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
 
def onnx2tflite(onnx_path):
    onnx_converter(
        onnx_model_path = onnx_path,
        need_simplify = False,
        output_path = os.path.dirname(onnx_path), 
        target_formats = ['tflite'], # or ['keras'], ['keras', 'tflite']
        weight_quant = False,
        int8_model = False,
        int8_mean = None,
        int8_std = None,
        image_root = None
        )

if __name__ == "__main__":
    onnx2tflite("./r-retinanet.onnx")

模型转换完成后,就可以在AidLux平台进行部署了。具体实现代码如下,这个代码是aidlux实现后置摄像头提取目标区域的。 ` if name=="main":

cap = cvs.VideoCapture(0)
frame_id = 0
while True:
    frame = cap.read()
    if frame is None:
        continue
    frame_id += 1
    if frame_id % 3 != 0:
        continue
    time0 = time.time()
    # 预处理
    im, im_scales = process_img(frame, NCHW=False, ToTensor=False)  # im: NHWC
    #img = preprocess_img(frame, target_shape=(640, 640), div_num=255, means=None, stds=None)
    # img /= 255


    ''' 定义输入输出shape '''
    in_shape = [1 * 640 * 800 * 3 * 4]  # HWC, float32
    out_shape = [1 * 53325 * 8 * 4]  # 8400: total cells, 52 = 48(num_classes) + 4(xywh), float32
    #out_shape = [1 * 55425 * 8 * 4]  # 8400: total cells, 52 = 48(num_classes) + 4(xywh), float32

    ''' AidLite初始化 '''
    aidlite = aidlite_gpu.aidlite()
    ''' 加载R-RetinaNet模型 '''
    tflite_model = '/home/Lesson3_Training_and_Deploy/Lesson3_Training_and_Deploy/AidLux_Deploy/AidLux_Deploy/models/r-retinanet.tflite'
    res = aidlite.ANNModel(tflite_model, in_shape, out_shape, 4, -1) # Infer on -1: cpu, 0: gpu, 1: mixed, 2: dsp

    ''' 设定输入输出 '''
    aidlite.setInput_Float32(im, 640, 800)

    ''' 启动推理 '''
    aidlite.invoke()

    ''' 捕获输出 '''
    preds = aidlite.getOutput_Float32(0)
    #preds = preds.reshape(1, 8, 53325)
    preds = preds.reshape(1, 8, (int)(preds.shape[0]/8))
    output = np.transpose(preds, (0, 2, 1))

    ''' 创建Anchor '''
    im_anchor = np.transpose(im, (0, 3, 1, 2)).astype(np.float32)
    anchors_list = []
    anchor_generator = Anchors(ratios = np.array([0.2, 0.5, 1, 2, 5]))
    original_anchors = anchor_generator(im_anchor)   # (bs, num_all_achors, 5)
    anchors_list.append(original_anchors)

    ''' 解算输出 '''
    decode_output = decoder(im_anchor, anchors_list[-1], output[..., 5:8], output[..., 0:5], thresh=0.5, nms_thresh=0.2, test_conf=None)
    for i in range(len(decode_output)):
        print("dim({}), shape: {}".format(i, decode_output[i].shape))

    ''' 重构输出 '''
    scores = decode_output[0].reshape(-1, 1)
    classes = decode_output[1].reshape(-1, 1)
    boxes = decode_output[2]
    boxes[:, :4] = boxes[:, :4] / im_scales
    if boxes.shape[1] > 5:   
        boxes[:, 5:9] = boxes[:, 5:9] / im_scales
    dets = np.concatenate([classes, scores, boxes], axis=1)

    ''' 过滤类别 '''
    keep = np.where(classes > 0)[0]
    dets =  dets[keep, :]

    ''' 转换坐标('xyxya'->'xyxyxyxy') '''
    res = sort_corners(rbox_2_quad(dets[:, 2:]))

    ''' 评估绘图 '''
    for k in range(dets.shape[0]):
        cv2.line(frame, (int(res[k, 0]), int(res[k, 1])), (int(res[k, 2]), int(res[k, 3])), (0, 255, 0), 3)
        cv2.line(frame, (int(res[k, 2]), int(res[k, 3])), (int(res[k, 4]), int(res[k, 5])), (0, 255, 0), 3)
        cv2.line(frame, (int(res[k, 4]), int(res[k, 5])), (int(res[k, 6]), int(res[k, 7])), (0, 255, 0), 3)
        cv2.line(frame, (int(res[k, 6]), int(res[k, 7])), (int(res[k, 0]), int(res[k, 1])), (0, 255, 0), 3)
    cvs.imshow(frame)

` 最后,演示视频如下。 演示视频