From a70e667efa05f52c3710a2731ead4782f7acdf72 Mon Sep 17 00:00:00 2001 From: vigo999 Date: Sun, 29 Sep 2024 23:05:20 +0800 Subject: [PATCH 1/3] update deploy folder with mslite infer on ascend 310 --- deploy/MINDIR.md | 29 ---- deploy/MINDX.md | 74 --------- deploy/ONNX.md | 45 ------ deploy/export.py | 4 - deploy/infer_engine/__init__.py | 6 - deploy/infer_engine/lite.py | 31 ---- deploy/infer_engine/mindir.py | 35 ---- deploy/infer_engine/mindx.py | 29 ---- deploy/infer_engine/model_base.py | 36 ----- deploy/infer_engine/onnxruntime.py | 27 ---- deploy/{predict.py => mslite_predict.py} | 116 +++++--------- deploy/test.py | 194 ----------------------- 12 files changed, 42 insertions(+), 584 deletions(-) delete mode 100644 deploy/MINDIR.md delete mode 100644 deploy/MINDX.md delete mode 100644 deploy/ONNX.md delete mode 100644 deploy/infer_engine/__init__.py delete mode 100644 deploy/infer_engine/lite.py delete mode 100644 deploy/infer_engine/mindir.py delete mode 100644 deploy/infer_engine/mindx.py delete mode 100644 deploy/infer_engine/model_base.py delete mode 100644 deploy/infer_engine/onnxruntime.py rename deploy/{predict.py => mslite_predict.py} (63%) delete mode 100644 deploy/test.py diff --git a/deploy/MINDIR.md b/deploy/MINDIR.md deleted file mode 100644 index 8fa7d774..00000000 --- a/deploy/MINDIR.md +++ /dev/null @@ -1,29 +0,0 @@ -# MINDIR部署指南 - -## 环境要求 -mindspore>=2.1 - -## 注意事项 -1. 当前仅支持Predict -2. 理论上也可在Ascend910上运行,未测试 - - -## 模型转换 - ckpt模型转为mindir模型,此步骤可在CPU上运行 - ```shell - python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU - e.g. - # 在CPU上运行 - python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU - ``` - -## MindIR Test - TODO - -## MindIR Predict - 对单张图片推理: - ```shell - python ./deploy/predict.py --model_type MindIR --model_path ./path_to_mindir/weight.mindir --config ./path_to_conifg/yolo.yaml --image_path ./path_to_image/image.jpg - e.g. - python deploy/predict.py --model_type MindIR --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg - ``` \ No newline at end of file diff --git a/deploy/MINDX.md b/deploy/MINDX.md deleted file mode 100644 index 41da16d5..00000000 --- a/deploy/MINDX.md +++ /dev/null @@ -1,74 +0,0 @@ -# MindX部署指南 - -## 环境配置 -参考:[MindX环境准备](https://www.hiascend.com/document/detail/zh/mind-sdk/300/quickstart/visionquickstart/visionquickstart_0003.html)
-注意:MindX目前支持的python版本为3.9,请在安装MindX前,准备好python3.9的环境
-1. 在MindX官网获取[环境安装包](https://www.hiascend.com/software/mindx-sdk/commercial),目前支持3.0.0版本MindX推理 -2. 跳转至[下载页面](https://support.huawei.com/enterprise/zh/ascend-computing/mindx-pid-252501207/software/255398987?idAbsPath=fixnode01%7C23710424%7C251366513%7C22892968%7C252501207)下载Ascend-mindxsdk-mxmanufacture_{version}_linux-{arch}.run -3. 将安装包放置于Ascend310机器目录中并解压 -4. 如不是root用户,需增加对套件包的可执行权限: -```shell -chmod +x Ascend-mindxsdk-mxmanufacture_{version}_linux-{arch}.run -``` -5. 进入开发套件包的上传路径,安装mxManufacture开发套件包。 -```shell -./Ascend-mindxsdk-mxmanufacture_{version}_linux-{arch}.run --install -``` -安装完成后,若出现如下回显,表示软件成功安装。 -```text -The installation is successfully -``` -安装完成后,mxManufacture软件目录结构如下所示: -```text -. -├── bin -├── config -├── filelist.txt -├── include -├── lib -├── opensource -├── operators -├── python -├── samples -├── set_env.sh -├── toolkit -└── version.info -``` -6. 进入mxmanufacture的安装目录,运行以下命令,使MindX SDK环境变量生效。 -```shell -source set_env.sh -``` -7. 进入./mxVision-3.0.0/python/,安装mindx-3.0.0-py3-none-any.whl -```shell -pip install mindx-3.0.0-py3-none-any.whl -``` - -## 模型转换 - 1. ckpt模型转为air模型,此步骤需要在Ascend910上操作 - ```shell - python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format AIR - e.g. - python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format AIR - ``` - * yolov7需要在2.0版本以上的Ascend910机器运行export - - 2. air模型转为om模型,使用[atc转换工具](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/63RC1alpha002/infacldevg/atctool/atlasatc_16_0005.html),此步骤需安装MindX环境,在Ascend310上运行 - ```shell - atc --model=./path_to_air/weight.air --framework=1 --output=yolo --soc_version=Ascend310 - ``` - -## MindX Test - 对COCO数据推理: - ```shell - python ./deploy/test.py --model_type MindX --model_path ./path_to_om/weight.om --config ./path_to_config/yolo.yaml - e.g. - python ./deploy/test.py --model_type MindX --model_path ./yolov5n.om --config ./configs/yolov5/yolov5n.yaml - ``` - -## MindX Predict - 对单张图片推理: - ```shell - python ./deploy/predict.py --model_type MindX --model_path ./path_to_om/weight.om --config ./path_to_config/yolo.yaml --image_path ./path_to_image/image.jpg - e.g. - python ./deploy/predict.py --model_type MindX --model_path ./yolov5n.om --config ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg - ``` \ No newline at end of file diff --git a/deploy/ONNX.md b/deploy/ONNX.md deleted file mode 100644 index ec88b179..00000000 --- a/deploy/ONNX.md +++ /dev/null @@ -1,45 +0,0 @@ -# ONNX部署指南 - -## 环境配置 - ```shell - pip install onnx>=1.9.0 - pip install onnxruntime>=1.8.0 - ``` - -## 注意事项 -1. 当前并非所有mindyolo均支持ONNX导出和推理(仅以YoloV3为例) -2. 当前仅支持Predict功能 -3. 导出ONNX需要调整nn.SiLU算子,采用sigmoid算子底层实现 -例如:添加如下自定义层并替换mindyolo中所有的nn.SiLU -```python -class EdgeSiLU(nn.Cell): - """ - SiLU activation function: x * sigmoid(x). To support for onnx export with nn.SiLU. - """ - - def __init__(self): - super().__init__() - - def construct(self, x): - return x * ops.sigmoid(x) -``` - -## 模型转换 - ckpt模型转为ONNX模型,此步骤以及Test步骤均仅支持CPU上运行 - ```shell - python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format ONNX --device_target [CPU] - e.g. - # 在CPU上运行 - python ./deploy/export.py --config ./configs/yolov3/yolov3.yaml --weight yolov3-darknet53_300e_mAP455-adfb27af.ckpt --per_batch_size 1 --file_format ONNX --device_target CPU - ``` - -## ONNX Test - TODO - -## ONNXRuntime Predict - 对单张图片推理: - ```shell - python ./deploy/predict.py --model_type ONNX --model_path ./path_to_onnx_model/model.onnx --config ./path_to_config/yolo.yaml --image_path ./path_to_image/image.jpg - e.g. - python ./deploy/predict.py --model_type ONNX --model_path ./yolov3.onnx --config ./configs/yolov3/yolov3.yaml --image_path ./coco/image/val2017/image.jpg - ``` \ No newline at end of file diff --git a/deploy/export.py b/deploy/export.py index b75492c9..f7111953 100644 --- a/deploy/export.py +++ b/deploy/export.py @@ -20,7 +20,6 @@ def get_parser_export(parents=None): parser = argparse.ArgumentParser(description="Export", parents=[parents] if parents else []) parser.add_argument("--device_target", type=str, default="Ascend", help="device target, Ascend/GPU/CPU") parser.add_argument("--ms_mode", type=int, default=0, help="train mode, graph/pynative") - parser.add_argument("--ms_amp_level", type=str, default="O0", help="amp level, O0/O1/O2") parser.add_argument("--weight", type=str, default="yolov7_300.ckpt", help="model.ckpt path(s)") parser.add_argument("--img_size", type=int, default=640, help="inference size (pixels)") parser.add_argument("--per_batch_size", type=int, default=1, help="size of each image batch") @@ -68,8 +67,6 @@ def export_weight(args): checkpoint_path=args.weight, ) network.set_train(False) - ms.amp.auto_mixed_precision(network, amp_level=args.ms_amp_level) - # Export input_arr = Tensor(np.ones([args.per_batch_size, 3, args.img_size, args.img_size]), ms.float32) file_name = os.path.basename(args.config)[:-5] # delete ".yaml" @@ -77,7 +74,6 @@ def export_weight(args): logger.info("Export completed.") - if __name__ == "__main__": parser = get_parser_export() args = parse_args(parser) diff --git a/deploy/infer_engine/__init__.py b/deploy/infer_engine/__init__.py deleted file mode 100644 index 269eaf3c..00000000 --- a/deploy/infer_engine/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .lite import LiteModel -from .mindx import MindXModel -from .mindir import MindIRModel -from .onnxruntime import ONNXRuntimeModel - -__all__ = ["LiteModel", "MindXModel", "MindIRModel", "ONNXRuntimeModel"] diff --git a/deploy/infer_engine/lite.py b/deploy/infer_engine/lite.py deleted file mode 100644 index 0b6215f3..00000000 --- a/deploy/infer_engine/lite.py +++ /dev/null @@ -1,31 +0,0 @@ -"""MindSpore Lite Inference""" - -from .model_base import ModelBase - - -class LiteModel(ModelBase): - def __init__(self, model_path, device_id=0): - super().__init__() - self.model_path = model_path - self.device_id = device_id - - self._init_model() - - def _init_model(self): - import mindspore_lite as mslite - - context = mslite.Context() - context.target = ["ascend"] - context.ascend.device_id = self.device_id - - self.model = mslite.Model() - self.model.build_from_file(self.model_path, mslite.ModelType.MINDIR, context) - - def infer(self, input): - inputs = self.model.get_inputs() - self.model.resize(inputs, [list(input.shape)]) - inputs[0].set_data_from_numpy(input) - - outputs = self.model.predict(inputs) - outputs = [output.get_data_to_numpy().copy() for output in outputs] - return outputs diff --git a/deploy/infer_engine/mindir.py b/deploy/infer_engine/mindir.py deleted file mode 100644 index 20f1bb6d..00000000 --- a/deploy/infer_engine/mindir.py +++ /dev/null @@ -1,35 +0,0 @@ -"""MindSpore Graph Mode Inference""" - -from .model_base import ModelBase -import numpy as np - - -class MindIRModel(ModelBase): - def __init__(self, model_path): - super().__init__() - self.model_path = model_path - self._init_model() - - def _init_model(self): - global ms, nn, Tensor - from mindspore import Tensor - import mindspore.nn as nn - import mindspore as ms - ms.set_context(mode=ms.GRAPH_MODE) - self.model = nn.GraphCell(ms.load(self.model_path)) - if not self.model: - raise ValueError(f"The model file {self.model_path} load failed.") - - def infer(self, input): - inputs = Tensor(input) - outputs = self.model(inputs) - # extract result from func return into Tensor lists - output_list = [] - for output in outputs: - if isinstance(output, tuple): - for out in output: - assert not isinstance(out, tuple), 'only support level one tuple' - output_list.append(out.asnumpy()) - else: - output_list.append(output.asnumpy()) - return output_list diff --git a/deploy/infer_engine/mindx.py b/deploy/infer_engine/mindx.py deleted file mode 100644 index abf82816..00000000 --- a/deploy/infer_engine/mindx.py +++ /dev/null @@ -1,29 +0,0 @@ -"""MindX SDK Inference""" - -import numpy as np -from .model_base import ModelBase - - -class MindXModel(ModelBase): - def __init__(self, model_path, device_id=0): - super().__init__() - self.model_path = model_path - self.device_id = device_id - - self._init_model() - - def _init_model(self): - global base, Tensor - from mindx.sdk import Tensor, base, visionDataFormat - - base.mx_init() - self.model = base.model(self.model_path, self.device_id) - if not self.model: - raise ValueError(f"The model file {self.model_path} load failed.") - - def infer(self, input): - inputs = Tensor(input) - outputs = self.model.infer(inputs) - list([output.to_host() for output in outputs]) - outputs = [np.array(output) for output in outputs] - return outputs diff --git a/deploy/infer_engine/model_base.py b/deploy/infer_engine/model_base.py deleted file mode 100644 index 441976a1..00000000 --- a/deploy/infer_engine/model_base.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Inference Model Base""" - -from abc import ABCMeta, abstractmethod -from typing import List - -import numpy as np - - -class ModelBase(metaclass=ABCMeta): - """ - base class for model load and infer - """ - - def __init__(self, *args, **kwargs): - super().__init__() - self.model = None - - @abstractmethod - def _init_model(self): - pass - - @abstractmethod - def infer(self, input: np.numarray) -> List[np.numarray]: - """ - model inference, just for single input - Args: - input: np img - - Returns: - - """ - pass - - def __del__(self): - if hasattr(self, "model") and self.model: - del self.model diff --git a/deploy/infer_engine/onnxruntime.py b/deploy/infer_engine/onnxruntime.py deleted file mode 100644 index fdf6cad7..00000000 --- a/deploy/infer_engine/onnxruntime.py +++ /dev/null @@ -1,27 +0,0 @@ -"""ONNX Runtime Inference""" - -from .model_base import ModelBase -import numpy as np - - -class ONNXRuntimeModel(ModelBase): - def __init__(self, model_path): - super().__init__() - self.model_path = model_path - self._init_model() - - def _init_model(self): - global ort - import onnxruntime as ort - - self.model = ort.InferenceSession(self.model_path, providers=ort.get_available_providers()) - if not self.model: - raise ValueError(f"The model file {self.model_path} load failed.") - - def infer(self, input): - assert len(self.model.get_inputs()) == 1, \ - "Input shape should be 1 but got {}".format(len(self.model.get_inputs())) - input_name = self.model.get_inputs()[0].name - # extract result from func return into Tensor lists - output_list = self.model.run(None, {input_name: input}) - return output_list diff --git a/deploy/predict.py b/deploy/mslite_predict.py similarity index 63% rename from deploy/predict.py rename to deploy/mslite_predict.py index 4421b72e..0f59088b 100644 --- a/deploy/predict.py +++ b/deploy/mslite_predict.py @@ -1,16 +1,16 @@ -"""MindYolo predict Script. Support evaluation of one image file""" +"""yolo prediction example script""" import argparse import ast import os +import sys import time - import cv2 import numpy as np import yaml from datetime import datetime -from mindspore import context, nn +import mindspore_lite as mslite from mindyolo.data import COCO80_TO_COCO91_CLASS from mindyolo.utils import logger @@ -21,29 +21,17 @@ def get_parser_infer(parents=None): parser = argparse.ArgumentParser(description="Infer", parents=[parents] if parents else []) - parser.add_argument("--task", type=str, default="detect", choices=["detect"]) - parser.add_argument("--device_target", type=str, default="Ascend", help="device target, Ascend/GPU/CPU") - parser.add_argument("--ms_mode", type=int, default=0, help="train mode, graph/pynative") - parser.add_argument("--ms_amp_level", type=str, default="O0", help="amp level, O0/O1/O2") - parser.add_argument( - "--ms_enable_graph_kernel", type=ast.literal_eval, default=False, help="use enable_graph_kernel or not" - ) - parser.add_argument("--weight", type=str, default="yolov7_300.ckpt", help="model.ckpt path(s)") parser.add_argument("--img_size", type=int, default=640, help="inference size (pixels)") parser.add_argument("--seed", type=int, default=2, help="set global seed") - - parser.add_argument("--model_type", type=str, default="MindX", help="model type MindX/Lite/MindIR/ONNX") - parser.add_argument("--model_path", type=str, default="./models/yolov5s.om", help="model weight path") - - parser.add_argument("--save_dir", type=str, default="./runs_infer", help="save dir") - parser.add_argument("--log_level", type=str, default="INFO", help="save dir") + parser.add_argument("--mindir_path", type=str, help="mindir path") + parser.add_argument("--result_folder", type=str, default="./log_result", help="predicted results folder") + parser.add_argument("--log_level", type=str, default="INFO", help="log level") parser.add_argument("--conf_thres", type=float, default=0.25, help="object confidence threshold") parser.add_argument("--iou_thres", type=float, default=0.65, help="IOU threshold for NMS") parser.add_argument( "--conf_free", type=ast.literal_eval, default=False, help="Whether the prediction result include conf" ) parser.add_argument("--nms_time_limit", type=float, default=60.0, help="time limit for NMS") - parser.add_argument("--image_path", type=str, help="path to image") parser.add_argument("--save_result", type=ast.literal_eval, default=True, help="whether save the inference result") parser.add_argument( @@ -52,16 +40,7 @@ def get_parser_infer(parents=None): return parser - def set_default_infer(args): - # Set Context - context.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000) - if args.device_target == "Ascend": - context.set_context(device_id=int(os.getenv("DEVICE_ID", 0))) - elif args.device_target == "GPU" and args.ms_enable_graph_kernel: - context.set_context(enable_graph_kernel=True) - args.rank, args.rank_size = 0, 1 - # Set Data args.data.nc = 1 if args.single_cls else int(args.data.nc) # number of classes args.data.names = ["item"] if args.single_cls and len(args.names) != 1 else args.data.names # class names assert len(args.data.names) == args.data.nc, "%g names found for nc=%g dataset in %s" % ( @@ -70,18 +49,17 @@ def set_default_infer(args): args.config, ) # Directories and Save run settings - args.save_dir = os.path.join(args.save_dir, datetime.now().strftime("%Y.%m.%d-%H:%M:%S")) - os.makedirs(args.save_dir, exist_ok=True) - if args.rank % args.rank_size == 0: - with open(os.path.join(args.save_dir, "cfg.yaml"), "w") as f: - yaml.dump(vars(args), f, sort_keys=False) + args.result_folder = os.path.join(args.result_folder, datetime.now().strftime("%Y.%m.%d-%H:%M:%S")) + os.makedirs(args.result_folder, exist_ok=True) + with open(os.path.join(args.result_folder, "cfg.yaml"), "w") as f: + yaml.dump(vars(args), f, sort_keys=False) # Set Logger - logger.setup_logging(logger_name="MindYOLO", log_level="INFO", rank_id=args.rank, device_per_servers=args.rank_size) - logger.setup_logging_file(log_dir=os.path.join(args.save_dir, "logs")) + logger.setup_logging(logger_name="MindYOLO", log_level="INFO") + logger.setup_logging_file(log_dir=os.path.join(args.result_folder, "logs")) def detect( - network: nn.Cell, + mindir_path: str, img: np.ndarray, conf_thres: float = 0.25, iou_thres: float = 0.65, @@ -113,10 +91,23 @@ def detect( img = np.ascontiguousarray(img) # Run infer _t = time.time() - out = network.infer(img)[0] # inference and training outputs + # init mslite model to predict + context = mslite.Context() + context.target = ["Ascend"] + model = mslite.Model() + logger.info('mslite model init...') + model.build_from_file(mindir_path,mslite.ModelType.MINDIR,context) + inputs = model.get_inputs() + model.resize(inputs,[list(img.shape)]) + inputs[0].set_data_from_numpy(img) + + outputs = model.predict(inputs) + outputs = [output.get_data_to_numpy().copy() for output in outputs] + out = outputs[0] infer_times = time.time() - _t # Run NMS + logger.info('perform nms...') t = time.time() out = non_max_suppression( out, @@ -159,58 +150,35 @@ def detect( return result_dict - def infer(args): # Init set_seed(args.seed) set_default_infer(args) - # Create Network - if args.model_type == "MindX": - from infer_engine.mindx import MindXModel - network = MindXModel(args.model_path) - elif args.model_type == "Lite": - from infer_engine.lite import LiteModel - network = LiteModel(args.model_path) - elif args.model_type == "MindIR": - from infer_engine.mindir import MindIRModel - network = MindIRModel(args.model_path) - elif args.model_type == "ONNX": - from infer_engine.onnxruntime import ONNXRuntimeModel - network = ONNXRuntimeModel(args.model_path) - else: - raise TypeError("the type only supposed MindX/Lite/MindIR/ONNX") - # Load Image if isinstance(args.image_path, str) and os.path.isfile(args.image_path): - import cv2 - img = cv2.imread(args.image_path) else: raise ValueError("Detect: input image file not available.") - - # Detect is_coco_dataset = "coco" in args.data.dataset_name - if args.task == "detect": - result_dict = detect( - network=network, - img=img, - conf_thres=args.conf_thres, - iou_thres=args.iou_thres, - conf_free=args.conf_free, - nms_time_limit=args.nms_time_limit, - img_size=args.img_size, - is_coco_dataset=is_coco_dataset, + # Detect + result_dict = detect( + mindir_path=args.mindir_path, + img=img, + conf_thres=args.conf_thres, + iou_thres=args.iou_thres, + conf_free=args.conf_free, + nms_time_limit=args.nms_time_limit, + img_size=args.img_size, + is_coco_dataset=is_coco_dataset, ) - if args.save_result: - save_path = os.path.join(args.save_dir, "detect_results") - draw_result(args.image_path, result_dict, args.data.names, save_path=save_path) - else: + if args.save_result: + save_path = os.path.join(args.result_folder, "detect_results") + draw_result(args.image_path, result_dict, args.data.names, save_path=save_path) + else: raise NotImplementedError - logger.info("Infer completed.") - - + logger.info("predict completed.") if __name__ == "__main__": parser = get_parser_infer() args = parse_args(parser) diff --git a/deploy/test.py b/deploy/test.py deleted file mode 100644 index b7a1369f..00000000 --- a/deploy/test.py +++ /dev/null @@ -1,194 +0,0 @@ -"""MindYolo Evaluation Script of COCO dataset""" - -import argparse -import ast -import os -import time -from pathlib import Path - -import numpy as np -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval - -from mindyolo.data import COCO80_TO_COCO91_CLASS, COCODataset, create_loader -from mindyolo.utils import logger -from mindyolo.utils.config import parse_args -from mindyolo.utils.metrics import non_max_suppression, scale_coords, xyxy2xywh - - -def test(args): - if args.task == "detect": - return test_detect(args) - else: - raise NotImplementedError - - -def test_detect(args): - # Create Network - if args.model_type == "MindX": - from infer_engine.mindx import MindXModel - network = MindXModel(args.model_path) - elif args.model_type == "Lite": - from infer_engine.lite import LiteModel - network = LiteModel(args.model_path) - else: - raise TypeError("the type only supposed MindX/Lite") - - dataset = COCODataset( - dataset_path=args.val_set, - img_size=args.img_size, - transforms_dict=args.test_transforms, - is_training=False, - augment=False, - rect=args.rect, - single_cls=args.single_cls, - batch_size=args.batch_size, - stride=max(args.network.stride), - ) - dataloader = create_loader( - dataset=dataset, - batch_collate_fn=dataset.test_collate_fn, - column_names_getitem=dataset.column_names_getitem, - column_names_collate=dataset.column_names_collate, - batch_size=args.batch_size, - epoch_size=1, - rank=0, - rank_size=1, - shuffle=False, - drop_remainder=False, - num_parallel_workers=2, - python_multiprocessing=True, - ) - - loader = dataloader.create_dict_iterator(output_numpy=True, num_epochs=1) - dataset_dir = args.val_set[: -len(args.val_set.split("/")[-1])] - anno_json_path = os.path.join(dataset_dir, "annotations/instances_val2017.json") - coco91class = COCO80_TO_COCO91_CLASS - - step_num = dataloader.get_dataset_size() - sample_num = 0 - infer_times = 0.0 - nms_times = 0.0 - result_dicts = [] - for i, data in enumerate(loader): - imgs, paths, ori_shape, pad, hw_scale = ( - data["images"], - data["img_files"], - data["hw_ori"], - data["pad"], - data["hw_scale"], - ) - nb, _, height, width = imgs.shape - # Run infer - _t = time.time() - out = network.infer(imgs)[0] # inference and training outputs - infer_times += time.time() - _t - # print(f"Sample {step_num}/{i + 1}, network time cost: {(time.time() - _t) * 1000:.2f} ms.") - - # Run NMS - t = time.time() - out = non_max_suppression( - out, - conf_thres=args.conf_thres, - iou_thres=args.iou_thres, - conf_free=args.conf_free, - multi_label=True, - time_limit=args.nms_time_limit, - ) - nms_times += time.time() - t - # print(f"Sample {step_num}/{i + 1}, nms time cost: {(time.time() - t) * 1000:.2f} ms.") - - # Statistics pred - for si, pred in enumerate(out): - path = Path(str(paths[si])) - sample_num += 1 - if len(pred) == 0: - continue - - # Predictions - predn = np.copy(pred) - scale_coords( - imgs[si].shape[1:], predn[:, :4], ori_shape[si], ratio=hw_scale[si], pad=pad[si] - ) # native-space pred - - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(pred.tolist(), box.tolist()): - result_dicts.append( - { - "image_id": image_id, - "category_id": coco91class[int(p[5])], - "bbox": [round(x, 3) for x in b], - "score": round(p[4], 5), - } - ) - print(f"Sample {step_num}/{i + 1}, time cost: {(time.time() - _t) * 1000:.2f} ms.") - - # Compute mAP - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - anno = COCO(anno_json_path) # init annotations api - pred = anno.loadRes(result_dicts) # init predictions api - eval = COCOeval(anno, pred, "bbox") - - eval.params.imgIds = [int(Path(im_file).stem) for im_file in dataset.img_files] - eval.evaluate() - eval.accumulate() - eval.summarize() - map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) - except Exception as e: - logger.warning(f"pycocotools unable to run: {e}") - raise e - - t = tuple(x / sample_num * 1e3 for x in (infer_times, nms_times, infer_times + nms_times)) + ( - height, - width, - args.batch_size, - ) # tuple - # logger.info(f'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g;' % t) - print(f"Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g;" % t) - - return map, map50 - - -def get_parser_test(parents=None): - parser = argparse.ArgumentParser(description="Test", parents=[parents] if parents else []) - - parser.add_argument("--task", type=str, default="detect", choices=["detect"]) - parser.add_argument("--img_size", type=int, default=640, help="inference size (pixels)") - parser.add_argument("--rect", type=ast.literal_eval, default=False, help="rectangular training") - parser.add_argument( - "--single_cls", type=ast.literal_eval, default=False, help="train multi-class data as single-class" - ) - parser.add_argument("--batch_size", type=int, default=1, help="size of each image batch") - - parser.add_argument("--model_type", type=str, default="Lite", help="model type MindX/Lite") - parser.add_argument("--model_path", type=str, default="./models/yolov5s.om", help="model weight path") - - parser.add_argument("--nc", type=int, default=80) - parser.add_argument("--val_set", type=str, default="./coco/val2017.txt") - parser.add_argument( - "--test_transforms", - type=list, - default=[ - {"func_name": "letterbox", "scaleup": False}, - {"func_name": "label_norm", "xyxy2xywh_": True}, - {"func_name": "label_pad", "padding_size": 160, "padding_value": -1}, - {"func_name": "image_norm", "scale": 255.0}, - {"func_name": "image_transpose", "bgr2rgb": True, "hwc2chw": True}, - ], - ) - parser.add_argument("--conf_thres", type=float, default=0.001) - parser.add_argument("--iou_thres", type=float, default=0.65) - parser.add_argument( - "--conf_free", type=ast.literal_eval, default=False, help="Whether the prediction result include conf" - ) - parser.add_argument("--nms_time_limit", type=float, default=20.0) - - return parser - - -if __name__ == "__main__": - parser = get_parser_test() - args = parse_args(parser) - test(args) From cc2ea7a73fbac5799a09fc37f3b9c952a2d57352 Mon Sep 17 00:00:00 2001 From: vigo999 Date: Sun, 29 Sep 2024 23:15:19 +0800 Subject: [PATCH 2/3] update content of deploy readme --- deploy/README.md | 143 +++++++++++++++++++---------------------------- 1 file changed, 56 insertions(+), 87 deletions(-) diff --git a/deploy/README.md b/deploy/README.md index bd1cbc62..06fffc00 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -1,105 +1,74 @@ -# MindYOLO部署 +## MindYOLO推理 -## 依赖 +以下为yolo系列模型在ascend 310推理的步骤 + +### 1 安装依赖 ```shell pip install -r requirement.txt ``` -## MindSpore Lite环境准备 - 参考:[Lite环境配置](https://mindspore.cn/lite)
- 注意:MindSpore Lite适配的python环境为3.7,请在安装Lite前准备好python3.7的环境
- 1. 根据环境,下载配套的tar.gz包和whl包 - 2. 解压tar.gz包并安装对应版本的whl包 - ```shell - tar -zxvf mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.tar.gz - pip install mindspore_lite-2.0.0a0-cp37-cp37m-{os}_{platform}_64.whl - ``` - 3. 配置Lite的环境变量 - LITE_HOME为tar.gz解压出的文件夹路径,推荐使用绝对路径 +### 2 安装MindSpore Lite + MindSpore Lite官方页面请查阅:[MindSpore Lite](https://mindspore.cn/lite)
+ - 下载tar.gz包并解压配置环境变量 ```shell - export LITE_HOME=/path/to/mindspore-lite-{version}-{os}-{platform} + tar -zxvf mmindspore_lite-[xxx].tar.gz + export LITE_HOME=/[path_to_mindspore_lite_xxx] export LD_LIBRARY_PATH=$LITE_HOME/runtime/lib:$LITE_HOME/tools/converter/lib:$LD_LIBRARY_PATH export PATH=$LITE_HOME/tools/converter/converter:$LITE_HOME/tools/benchmark:$PATH ``` - -## 快速开始 - -### 模型转换 - ckpt模型转为mindir模型,此步骤可在CPU/Ascend910上运行 + LITE_HOME为tar.gz解压出的文件夹路径,请设置绝对路径 + - 安装whl包 ```shell - python ./deploy/export.py --config ./path_to_config/model.yaml --weight ./path_to_ckpt/weight.ckpt --per_batch_size 1 --file_format MINDIR --device_target [CPU/Ascend] - e.g. - # 在CPU上运行 - python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target CPU - # 在Ascend上运行 - python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5n_300e_mAP273-9b16bd7b.ckpt --per_batch_size 1 --file_format MINDIR --device_target Ascend + pip install mindspore_lite-[xxx]whl ``` - -### Lite Test - ```shell - python deploy/test.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_config/yolo.yaml - e.g. - python deploy/test.py --model_type Lite --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml - ``` - -### Lite Predict + - 验证过的mindspore lite版本为:2.2.14/2.3.0/2.3.1 + - 请安装相对应的ascend driver/firmware/ascend-toolkit +### 3 模型转换 ckpt -> mindir(可选) + 训练完成的模型ckpt权重转为mindir,此步骤需在Ascend910上进行。 + 例如 ```shell - python ./deploy/predict.py --model_type Lite --model_path ./path_to_mindir/weight.mindir --config ./path_to_conifg/yolo.yaml --image_path ./path_to_image/image.jpg - e.g. - python deploy/predict.py --model_type Lite --model_path ./yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml --image_path ./coco/image/val2017/image.jpg + python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5.ckpt --file_format MINDIR --device_target Ascend ``` -## 脚本说明 - - predict.py 支持单张图片推理 - - test.py 支持COCO数据集推理 - - 注意:当前只支持在Ascend 310上推理 +### 4 单张图片推理 -## MindX部署 + 以yolov5为例,工作目录为/work -查看 [MINDX](MINDX.md) - -## MindIR部署 - -查看 [MINDIR](MINDIR.md) - -## ONNX部署 -**注意:** 仅部分模型支持导出ONNX并使用ONNXRuntime进行部署 -查看 [ONNX](ONNX.md) - - -## 标准和支持的模型库 - -- [x] [YOLOv7](../configs/yolov7) -- [x] [YOLOv5](../configs/yolov5) -- [x] [YOLOv3](../configs/yolov3) -- [x] [YOLOv8](../configs/yolov8) -- [x] [YOLOv4](../configs/yolov4) -- [x] [YOLOX](../configs/yolox) - -| Name | Scale | Context | ImageSize | Dataset | Box mAP (%) | Params | FLOPs | Recipe | Download | -|--------|--------------------|----------|-----------|--------------|-------------|--------|--------|-----------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------| -| YOLOv8 | N | D310x1-G | 640 | MS COCO 2017 | 37.2 | 3.2M | 8.7G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8n.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-n_500e_mAP372-cc07f5bd.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-n_500e_mAP372-cc07f5bd-36a7ffec.mindir) | -| YOLOv8 | S | D310x1-G | 640 | MS COCO 2017 | 44.6 | 11.2M | 28.6G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8s.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-s_500e_mAP446-3086f0c9.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-s_500e_mAP446-3086f0c9-137e9384.mindir) | -| YOLOv8 | M | D310x1-G | 640 | MS COCO 2017 | 50.5 | 25.9M | 78.9G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8m.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-m_500e_mAP505-8ff7a728.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-m_500e_mAP505-8ff7a728-e21c252b.mindir) | -| YOLOv8 | L | D310x1-G | 640 | MS COCO 2017 | 52.8 | 43.7M | 165.2G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8l.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-l_500e_mAP528-6e96d6bb.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-l_500e_mAP528-6e96d6bb-55db59b4.mindir) | -| YOLOv8 | X | D310x1-G | 640 | MS COCO 2017 | 53.7 | 68.2M | 257.8G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8x.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-x_500e_mAP537-b958e1c7.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-x_500e_mAP537-b958e1c7-2a034e2c.mindir) | -| YOLOv7 | Tiny | D310x1-G | 640 | MS COCO 2017 | 37.5 | 6.2M | 13.8G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov7/yolov7-tiny.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7-tiny_300e_mAP375-d8972c94.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7-tiny_300e_mAP375-d8972c94-c550e241.mindir) | -| YOLOv7 | L | D310x1-G | 640 | MS COCO 2017 | 50.8 | 36.9M | 104.7G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov7/yolov7.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7_300e_mAP508-734ac919.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7_300e_mAP508-734ac919-6d65d27c.mindir) | -| YOLOv7 | X | D310x1-G | 640 | MS COCO 2017 | 52.4 | 71.3M | 189.9G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov7/yolov7-x.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7-x_300e_mAP524-e2f58741.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7-x_300e_mAP524-e2f58741-583e624b.mindir) | -| YOLOv5 | N | D310x1-G | 640 | MS COCO 2017 | 27.3 | 1.9M | 4.5G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5n.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5n_300e_mAP273-9b16bd7b.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5n_300e_mAP273-9b16bd7b-bd03027b.mindir) | -| YOLOv5 | S | D310x1-G | 640 | MS COCO 2017 | 37.6 | 7.2M | 16.5G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5s.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5s_300e_mAP376-860bcf3b.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5s_300e_mAP376-860bcf3b-c105deb6.mindir) | -| YOLOv5 | M | D310x1-G | 640 | MS COCO 2017 | 44.9 | 21.2M | 49.0G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5m.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5m_300e_mAP449-e7bbf695.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5m_300e_mAP449-e7bbf695-b1525c76.mindir) | -| YOLOv5 | L | D310x1-G | 640 | MS COCO 2017 | 48.5 | 46.5M | 109.1G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5l.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5l_300e_mAP485-a28bce73.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5l_300e_mAP485-a28bce73-d4e437c2.mindir) | -| YOLOv5 | X | D310x1-G | 640 | MS COCO 2017 | 50.5 | 86.7M | 205.7G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5x.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5x_300e_mAP505-97d36ddc.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5x_300e_mAP505-97d36ddc-cae885cf.mindir) | -| YOLOv4 | CSPDarknet53 | D310x1-G | 608 | MS COCO 2017 | 45.4 | 27.6M | 52G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov4/yolov4.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov4/yolov4-cspdarknet53_320e_map454-50172f93.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov4/yolov4-cspdarknet53_320e_map454-50172f93-cf2b8452.mindir) | -| YOLOv4 | CSPDarknet53(silu) | D310x1-G | 640 | MS COCO 2017 | 45.8 | 27.6M | 52G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov4/yolov4-silu.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov4/yolov4-cspdarknet53_silu_320e_map458-bdfc3205.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov4/yolov4-cspdarknet53_silu_320e_map458-bdfc3205-a0844d9f.mindir) | -| YOLOv3 | Darknet53 | D310x1-G | 640 | MS COCO 2017 | 45.5 | 61.9M | 156.4G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov3/yolov3.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolov3/yolov3-darknet53_300e_mAP455-adfb27af.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov3/yolov3-darknet53_300e_mAP455-adfb27af-335965fc.mindir) | -| YOLOX | N | D310x1-G | 416 | MS COCO 2017 | 24.1 | 0.9M | 1.1G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-nano.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-n_300e_map241-ec9815e3.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-n_300e_map241-ec9815e3-13b3ac7f.mindir) | -| YOLOX | Tiny | D310x1-G | 416 | MS COCO 2017 | 33.3 | 5.1M | 6.5G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-tiny.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-tiny_300e_map333-e5ae3a2e.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-tiny_300e_map333-e5ae3a2e-ff08fe48.mindir) | -| YOLOX | S | D310x1-G | 640 | MS COCO 2017 | 40.7 | 9.0M | 26.8G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-s.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-s_300e_map407-0983e07f.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-s_300e_map407-0983e07f-2f0f7762.mindir) | -| YOLOX | M | D310x1-G | 640 | MS COCO 2017 | 46.7 | 25.3M | 73.8G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-m.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-m_300e_map467-1db321ee.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-m_300e_map467-1db321ee-5a56d70e.mindir) | -| YOLOX | L | D310x1-G | 640 | MS COCO 2017 | 49.2 | 54.2M | 155.6G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-l.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-l_300e_map492-52a4ab80.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-l_300e_map492-52a4ab80-e1c4f344.mindir) | -| YOLOX | X | D310x1-G | 640 | MS COCO 2017 | 51.6 | 99.1M | 281.9G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-x.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-x_300e_map516-52216d90.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-x_300e_map516-52216d90-e5c397bc.mindir) | -| YOLOX | Darknet53 | D310x1-G | 640 | MS COCO 2017 | 47.7 | 63.7M | 185.3G | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-darknet53.yaml) | [ckpt](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-darknet53_300e_map477-b5fcaba9.ckpt)
[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-darknet53_300e_map477-b5fcaba9-d3380d02.mindir) | + ```shell + cd work + git clone https://github.com/mindspore-lab/mindyolo.git + cd mindyolo + export PYTHONPATH="/work/mindyolo":$PYTHONPATH + python ./deploy/mslite_predict.py --mindir_path yolov5n.mindir --config ./configs/yolov5/yolov5n.yaml --image_path test_img.jpg + ``` + yolov5n.mindir 是已经从ckpt转好的mindir文件。可从mindir支持列表中下载 + +### mindir支持列表 + +| model | scale | img size | dataset | map| recipe | mindir| +|--------|:-----:|-----|--------|--------|--------|-------| +| YOLOv8 | N | 640 | MS COCO 2017 | 37.2 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8n.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-n_500e_mAP372-cc07f5bd-36a7ffec.mindir) | +| YOLOv8 | S | 640 | MS COCO 2017 | 44.6 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8s.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-s_500e_mAP446-3086f0c9-137e9384.mindir) | +| YOLOv8 | M | 640 | MS COCO 2017 | 50.5 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8m.yaml) |[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-m_500e_mAP505-8ff7a728-e21c252b.mindir) | +| YOLOv8 | L | 640 | MS COCO 2017 | 52.8 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8l.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-l_500e_mAP528-6e96d6bb-55db59b4.mindir) | +| YOLOv8 | X | 640 | MS COCO 2017 | 53.7 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov8/yolov8x.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov8/yolov8-x_500e_mAP537-b958e1c7-2a034e2c.mindir) | +| YOLOv7 | Tiny | 640 | MS COCO 2017 | 37.5 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov7/yolov7-tiny.yaml) |[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7-tiny_300e_mAP375-d8972c94-c550e241.mindir) | +| YOLOv7 | L | 640 | MS COCO 2017 | 50.8 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov7/yolov7.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7_300e_mAP508-734ac919-6d65d27c.mindir) | +| YOLOv7 | X | 640 | MS COCO 2017 | 52.4 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov7/yolov7-x.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov7/yolov7-x_300e_mAP524-e2f58741-583e624b.mindir) | +| YOLOv5 | N | 640 | MS COCO 2017 | 27.3 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5n.yaml) |[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5n_300e_mAP273-9b16bd7b-bd03027b.mindir) | +| YOLOv5 | S | 640 | MS COCO 2017 | 37.6 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5s.yaml) |[mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5s_300e_mAP376-860bcf3b-c105deb6.mindir) | +| YOLOv5 | M | 640 | MS COCO 2017 | 44.9 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5m.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5m_300e_mAP449-e7bbf695-b1525c76.mindir) | +| YOLOv5 | L | 640 | MS COCO 2017 | 48.5 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5l.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5l_300e_mAP485-a28bce73-d4e437c2.mindir) | +| YOLOv5 | X | 640 | MS COCO 2017 | 50.5 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov5/yolov5x.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov5/yolov5x_300e_mAP505-97d36ddc-cae885cf.mindir) | +| YOLOv4 | CSPDarknet53 | 608 | MS COCO 2017 | 45.4 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov4/yolov4.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov4/yolov4-cspdarknet53_320e_map454-50172f93-cf2b8452.mindir) | +| YOLOv4 | CSPDarknet53(silu) | 640 | MS COCO 2017 | 45.8 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov4/yolov4-silu.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov4/yolov4-cspdarknet53_silu_320e_map458-bdfc3205-a0844d9f.mindir) | +| YOLOv3 | Darknet53 | 640 | MS COCO 2017 | 45.5 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolov3/yolov3.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolov3/yolov3-darknet53_300e_mAP455-adfb27af-335965fc.mindir) | +| YOLOX | N | 416 | MS COCO 2017 | 24.1 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-nano.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-n_300e_map241-ec9815e3-13b3ac7f.mindir) | +| YOLOX | Tiny| 416 | MS COCO 2017 | 33.3 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-tiny.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-tiny_300e_map333-e5ae3a2e-ff08fe48.mindir) | +| YOLOX | S | 640 | MS COCO 2017 | 40.7 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-s.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-s_300e_map407-0983e07f-2f0f7762.mindir) | +| YOLOX | M | 640 | MS COCO 2017 | 46.7 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-m.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-m_300e_map467-1db321ee-5a56d70e.mindir) | +| YOLOX | L | 640 | MS COCO 2017 | 49.2 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-l.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-l_300e_map492-52a4ab80-e1c4f344.mindir) | +| YOLOX | X | 640 | MS COCO 2017 | 51.6 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-x.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-x_300e_map516-52216d90-e5c397bc.mindir) | +| YOLOX | Darknet53 | 640 | MS COCO 2017 | 47.7| [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-darknet53.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-darknet53_300e_map477-b5fcaba9-d3380d02.mindir)
\ No newline at end of file From 7cc99182c0371be4d38416fb4b967b19cee70cac Mon Sep 17 00:00:00 2001 From: vigo999 <41234155+vigo999@users.noreply.github.com> Date: Sun, 29 Sep 2024 22:56:35 +0800 Subject: [PATCH 3/3] fix typo of README.md --- deploy/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/README.md b/deploy/README.md index 06fffc00..0794c5e4 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -9,9 +9,9 @@ ### 2 安装MindSpore Lite MindSpore Lite官方页面请查阅:[MindSpore Lite](https://mindspore.cn/lite)
- - 下载tar.gz包并解压配置环境变量 + - 下载tar.gz包并解压,同时配置环境变量LITE_HOME,LD_LIBRARY_PATH,PATH ```shell - tar -zxvf mmindspore_lite-[xxx].tar.gz + tar -zxvf mindspore_lite-[xxx].tar.gz export LITE_HOME=/[path_to_mindspore_lite_xxx] export LD_LIBRARY_PATH=$LITE_HOME/runtime/lib:$LITE_HOME/tools/converter/lib:$LD_LIBRARY_PATH export PATH=$LITE_HOME/tools/converter/converter:$LITE_HOME/tools/benchmark:$PATH @@ -19,12 +19,12 @@ LITE_HOME为tar.gz解压出的文件夹路径,请设置绝对路径 - 安装whl包 ```shell - pip install mindspore_lite-[xxx]whl + pip install mindspore_lite-[xxx].whl ``` - 验证过的mindspore lite版本为:2.2.14/2.3.0/2.3.1 - 请安装相对应的ascend driver/firmware/ascend-toolkit ### 3 模型转换 ckpt -> mindir(可选) - 训练完成的模型ckpt权重转为mindir,此步骤需在Ascend910上进行。 + 训练完成的模型ckpt权重转为mindir 例如 ```shell python ./deploy/export.py --config ./configs/yolov5/yolov5n.yaml --weight yolov5.ckpt --file_format MINDIR --device_target Ascend @@ -43,7 +43,7 @@ ``` yolov5n.mindir 是已经从ckpt转好的mindir文件。可从mindir支持列表中下载 -### mindir支持列表 +## mindir支持列表 | model | scale | img size | dataset | map| recipe | mindir| |--------|:-----:|-----|--------|--------|--------|-------| @@ -71,4 +71,4 @@ | YOLOX | X | 640 | MS COCO 2017 | 51.6 | [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-x.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-x_300e_map516-52216d90-e5c397bc.mindir) | | YOLOX | Darknet53 | 640 | MS COCO 2017 | 47.7| [yaml](https://github.com/mindspore-lab/mindyolo/blob/master/configs/yolox/yolox-darknet53.yaml) | [mindir](https://download.mindspore.cn/toolkits/mindyolo/yolox/yolox-darknet53_300e_map477-b5fcaba9-d3380d02.mindir) -
\ No newline at end of file +