RKNN3588——利用推理YOLOv8推理图片
1. yolov8_test.py
import os
import cv2
import numpy as np
from class_type import CLASSES
# 设置对象置信度阈值和非极大值抑制(NMS)阈值。
OBJ_THRESH = 0.25
NMS_THRESH = 0.45
IMG_SIZE = (640, 640)
def filter_boxes(boxes, box_confidences, box_class_probs):
# 筛选出满足条件的框,根据置信度和类别概率筛选出有效的框。
box_confidences = box_confidences.reshape(-1)
# candidate, class_num = box_class_probs.shape
class_max_score = np.max(box_class_probs, axis=-1)
classes = np.argmax(box_class_probs, axis=-1)
_class_pos = np.where(class_max_score * box_confidences >= OBJ_THRESH)
scores = (class_max_score * box_confidences)[_class_pos]
boxes = boxes[_class_pos]
classes = classes[_class_pos]
return boxes, classes, scores
def nms_boxes(boxes, scores):
# 使用非极大值抑制(NMS)来消除冗余框,保留最优的检测框。
x = boxes[:, 0]
y = boxes[:, 1]
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
areas = w * h
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x[i], x[order[1:]])
yy1 = np.maximum(y[i], y[order[1:]])
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
inter = w1 * h1
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr None:
self.rknn_model = model_path
self.rknn_lite = RKNNLite()
print(f'--> Load {self.rknn_model} model')
ret = self.rknn_lite.load_rknn(self.rknn_model)
if ret != 0:
print('Load RKNNLite model failed')
exit(ret)
print('done')
# 初始化运行环境
print('--> Init runtime environment')
ret = self.rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2)
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
def inference(self, img_src, IMG_SIZE):
if img_src is None:
print('Error: image read failed')
return None
self.co_helper = COCO_test_helper(enable_letter_box=True)
img = self.co_helper.letter_box(im=img_src.copy(), new_shape=(IMG_SIZE[1], IMG_SIZE[0]), pad_color=(0, 0, 0))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.expand_dims(img, 0)
# print(f'--> Running RKNN model')
outputs = self.rknn_lite.inference(inputs=[img])
return outputs
def release(self):
self.rknn_lite.release()
def recover_real_box(self, boxes):
# 还原框
boxes = self.co_helper.get_real_box(boxes)
return boxes
if __name__ == '__main__':
yolo_model_path = 'yolov8-main/study/yolov8-240617.rknn'
yolo_model = Model(yolo_model_path)
img_path = r"yolov8-main/study/76_269.jpg"
img = cv2.imread(img_path)
yolo_result = yolo_model.inference(img, IMG_SIZE=(640,640))
boxes, classes, scores = yolov8_post_process(yolo_result)
boxes = yolo_model.recover_real_box(boxes=boxes)
after_images = draw(img, boxes, scores, classes)
cv2.imwrite("1.jpg",after_images)
# print(yolo_result)
3. study/class_type.py
CLASSES = ("building", "building2", "statue")
coco_id_list = [1, 2, 3]
(图片来源网络,侵删)
免责声明:我们致力于保护作者版权,注重分享,被刊用文章因无法核实真实出处,未能及时与作者取得联系,或有版权异议的,请联系管理员,我们会立即处理! 部分文章是来自自研大数据AI进行生成,内容摘自(百度百科,百度知道,头条百科,中国民法典,刑法,牛津词典,新华词典,汉语词典,国家院校,科普平台)等数据,内容仅供学习参考,不准确地方联系删除处理! 图片声明:本站部分配图来自人工智能系统AI生成,觅知网授权图片,PxHere摄影无版权图库和百度,360,搜狗等多加搜索引擎自动关键词搜索配图,如有侵权的图片,请第一时间联系我们,邮箱:ciyunidc@ciyunshuju.com。本站只作为美观性配图使用,无任何非法侵犯第三方意图,一切解释权归图片著作权方,本站不承担任何责任。如有恶意碰瓷者,必当奉陪到底严惩不贷!
