From d154146eb91aa31451d621240f40876a6512cc4b Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 7 Jul 2023 02:07:45 +0000 Subject: [PATCH 1/3] Canonical Code Format --- edgelab/engine/apis/mmdet/train.py | 2 +- edgelab/engine/hooks/logger/text.py | 6 +-- edgelab/engine/hooks/visualization_hook.py | 1 - edgelab/engine/runner/loops.py | 18 +++++---- edgelab/models/__init__.py | 19 +++------ edgelab/models/backbones/AxesNet.py | 2 +- edgelab/models/backbones/MobileNetv2.py | 8 +++- edgelab/models/backbones/ShuffleNetV2.py | 1 - edgelab/models/backbones/SoundNet.py | 1 - edgelab/models/base/general.py | 5 +-- edgelab/models/detectors/__init__.py | 2 +- edgelab/models/heads/axes_head.py | 4 +- edgelab/models/heads/fastestdet_head.py | 1 + edgelab/models/heads/pfld_head.py | 8 ++-- edgelab/models/layers/attention.py | 2 +- edgelab/models/layers/rep.py | 28 ++++++------- edgelab/models/layers/test.py | 47 ++++++++++++++++++++++ edgelab/models/mot/bytetrack.py | 1 - edgelab/models/tf/tf_common.py | 1 - edgelab/visualization/visualizer.py | 28 +++++++------ tools/analysis/get_flops.py | 4 +- tools/dataset_converters/ei2coco.py | 2 - tools/quan_test.py | 12 +++--- tools/utils/config.py | 3 +- tools/utils/inference.py | 25 +++++++----- tools/utils/iot_camera.py | 4 +- tools/utils/quant_read.py | 2 +- 27 files changed, 140 insertions(+), 97 deletions(-) create mode 100644 edgelab/models/layers/test.py diff --git a/edgelab/engine/apis/mmdet/train.py b/edgelab/engine/apis/mmdet/train.py index 738b9e43..1f6d4a1d 100644 --- a/edgelab/engine/apis/mmdet/train.py +++ b/edgelab/engine/apis/mmdet/train.py @@ -6,7 +6,7 @@ import torch import torch.distributed as dist from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, - Fp16OptimizerHook, OptimizerHook, build_runner,builder, + Fp16OptimizerHook, OptimizerHook, build_runner, get_dist_info) from mmdet.datasets.utils import replace_ImageToTensor diff --git a/edgelab/engine/hooks/logger/text.py b/edgelab/engine/hooks/logger/text.py index b432a803..812523ed 100644 --- a/edgelab/engine/hooks/logger/text.py +++ b/edgelab/engine/hooks/logger/text.py @@ -1,8 +1,7 @@ import logging import datetime from pathlib import Path -from collections import OrderedDict -from typing import Optional, Union, Dict +from typing import Optional, Union import torch import torch.distributed as dist @@ -154,7 +153,8 @@ def setloglevel(self, runner: Runner, handler: logging.Handler = logging.StreamHandler, level: int = logging.ERROR): - if handler in self.handltype: return + if handler in self.handltype: + return for i, hand in enumerate(runner.logger.handlers): if type(hand) is handler: hand.setLevel(level) diff --git a/edgelab/engine/hooks/visualization_hook.py b/edgelab/engine/hooks/visualization_hook.py index 316c4c2e..244634c1 100644 --- a/edgelab/engine/hooks/visualization_hook.py +++ b/edgelab/engine/hooks/visualization_hook.py @@ -85,7 +85,6 @@ def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, ]) out_file = f'{out_file_name}_{index}.{postfix}' out_file = os.path.join(self.out_dir, out_file) - from mmpose.visualization import PoseLocalVisualizer self._visualizer.add_datasample( os.path.basename(img_path) if self.show else 'test_img', img, diff --git a/edgelab/engine/runner/loops.py b/edgelab/engine/runner/loops.py index 360eefc4..96b55950 100644 --- a/edgelab/engine/runner/loops.py +++ b/edgelab/engine/runner/loops.py @@ -1,9 +1,11 @@ from typing import Dict, Sequence, Union, List +import onnx +import torch +from torch.utils.data import DataLoader from mmengine.runner import Runner from mmengine.evaluator.evaluator import Evaluator from mmengine.runner.loops import EpochBasedTrainLoop, BaseLoop -from torch.utils.data import DataLoader from edgelab.registry import LOOPS from mmengine.registry import RUNNERS @@ -56,14 +58,16 @@ def __init__( if isinstance(model, list): try: import ncnn - except: + except ImportError: raise ImportError( 'You have not installed ncnn yet, please execute the "pip install ncnn" command to install and run again' ) net = ncnn.Net() for p in model: - if p.endswith('param'): param = p - if p.endswith('bin'): bin = p + if p.endswith('param'): + param = p + if p.endswith('bin'): + bin = p net.load_param(param) net.load_model(bin) # net.opt.use_vulkan_compute = True @@ -71,14 +75,14 @@ def __init__( elif model.endswith('onnx'): try: import onnxruntime - except: + except ImportError: raise ImportError( 'You have not installed onnxruntime yet, please execute the "pip install onnxruntime" command to install and run again' ) try: net = onnx.load(model) onnx.checker.check_model(net) - except: + except ValueError: raise ValueError( 'onnx file have error,please check your onnx export code!') providers = [ @@ -89,7 +93,7 @@ def __init__( elif model.endswith('tflite'): try: import tensorflow as tf - except: + except ImportError: raise ImportError( 'You have not installed tensorflow yet, please execute the "pip install tensorflow" command to install and run again' ) diff --git a/edgelab/models/__init__.py b/edgelab/models/__init__.py index 17c09e49..ea5b7f9d 100644 --- a/edgelab/models/__init__.py +++ b/edgelab/models/__init__.py @@ -1,13 +1,6 @@ -from .backbones import * -from .detectors import * -from .classifiers import * -from .heads import * -from .losses import * -from .necks import * - -__all__ = [ - 'SoundNetRaw', 'Speechcommand', 'PFLD', 'Audio_head', 'Audio_classify', - 'LabelSmoothCrossEntropyLoss', 'PFLDLoss', 'PFLDhead', 'FastestDet', 'SPP', - 'NLLLoss', 'BCEWithLogitsLoss', 'Fomo_Head', 'CustomShuffleNetV2', - 'FomoLoss','Fomo', 'AxesNet', 'AccelerometerClassifier','FPN','MobileNetv2' -] +from .backbones import * #noqa +from .detectors import * #noqa +from .classifiers import * #noqa +from .heads import * #noqa +from .losses import * #noqa +from .necks import * #noqa \ No newline at end of file diff --git a/edgelab/models/backbones/AxesNet.py b/edgelab/models/backbones/AxesNet.py index ecdc84ab..082128ad 100644 --- a/edgelab/models/backbones/AxesNet.py +++ b/edgelab/models/backbones/AxesNet.py @@ -1,6 +1,6 @@ -import torch import torch.nn as nn import torch.nn.functional as F + from edgelab.registry import MODELS diff --git a/edgelab/models/backbones/MobileNetv2.py b/edgelab/models/backbones/MobileNetv2.py index 58993b01..d7a4380a 100644 --- a/edgelab/models/backbones/MobileNetv2.py +++ b/edgelab/models/backbones/MobileNetv2.py @@ -5,7 +5,7 @@ from mmengine.model import BaseModule from edgelab.registry import BACKBONES, MODELS from torchvision.models._utils import _make_divisible -from edgelab.models.layers.rep import RepBlock +from edgelab.models.layers.rep import RepBlock,RepConv1x1 from ..base.general import InvertedResidual, ConvNormActivation @@ -27,7 +27,8 @@ def __init__(self, if block is None and not rep: block = InvertedResidual elif rep: - block = RepBlock + # block = RepBlock + block = RepConv1x1 elif isinstance(block, dict): block = MODELS.build(rep) @@ -76,6 +77,9 @@ def __init__(self, stride=stride, groups=in_channels, norm_layer=norm_layer) + + elif block is RepConv1x1: + layer=block(in_channels,out_channels,stride=stride,depth=6) else: layer = block(in_channels, out_channels, diff --git a/edgelab/models/backbones/ShuffleNetV2.py b/edgelab/models/backbones/ShuffleNetV2.py index bacaf645..131c7ed3 100644 --- a/edgelab/models/backbones/ShuffleNetV2.py +++ b/edgelab/models/backbones/ShuffleNetV2.py @@ -1,4 +1,3 @@ -import torchvision import torch from torch import Tensor from typing import Optional diff --git a/edgelab/models/backbones/SoundNet.py b/edgelab/models/backbones/SoundNet.py index 5ab2ec61..ba1125e4 100644 --- a/edgelab/models/backbones/SoundNet.py +++ b/edgelab/models/backbones/SoundNet.py @@ -86,7 +86,6 @@ def __init__(self, factors=[4, 4, 4], out_channel=32): super().__init__() - base_ = 4 model = [ # nn.ReflectionPad1d(3), nn.Conv1d(1, nf, kernel_size=11, stride=6, padding=5, bias=False), diff --git a/edgelab/models/base/general.py b/edgelab/models/base/general.py index 38e72c63..91404ddb 100644 --- a/edgelab/models/base/general.py +++ b/edgelab/models/base/general.py @@ -1,10 +1,7 @@ from typing import Optional, Callable, Dict, AnyStr, Any import torch import torch.nn as nn - -from mmcv.cnn.bricks.norm import MODELS -from mmcv.cnn.bricks.activation import MODELS -from mmcv.cnn.bricks.conv import MODELS +from mmengine.registry import MODELS def get_conv(conv): diff --git a/edgelab/models/detectors/__init__.py b/edgelab/models/detectors/__init__.py index 952b7f1b..2de2b0f1 100644 --- a/edgelab/models/detectors/__init__.py +++ b/edgelab/models/detectors/__init__.py @@ -2,4 +2,4 @@ from .fomo import Fomo from .fastestdet import FastestDet -__all__ = ['PFLD','FastestDet'] +__all__ = ['PFLD', 'FastestDet', 'Fomo'] diff --git a/edgelab/models/heads/axes_head.py b/edgelab/models/heads/axes_head.py index d87d9f04..7c84cdf8 100644 --- a/edgelab/models/heads/axes_head.py +++ b/edgelab/models/heads/axes_head.py @@ -1,9 +1,7 @@ import torch -import torch.nn as nn -import torch.nn.functional as F from edgelab.registry import MODELS from mmcls.models.heads import ClsHead -from typing import List, Optional, Tuple, Union +from typing import Optional, Tuple, Union @MODELS.register_module() class AxesClsHead(ClsHead): diff --git a/edgelab/models/heads/fastestdet_head.py b/edgelab/models/heads/fastestdet_head.py index 9ec59105..ec6a1ca2 100644 --- a/edgelab/models/heads/fastestdet_head.py +++ b/edgelab/models/heads/fastestdet_head.py @@ -3,6 +3,7 @@ import torch import torchvision import torch.nn as nn +from mmcv.cnn import is_norm from mmengine.model import BaseModule from mmengine.model import normal_init, constant_init from edgelab.registry import MODELS diff --git a/edgelab/models/heads/pfld_head.py b/edgelab/models/heads/pfld_head.py index 063cfc9c..77c3d831 100644 --- a/edgelab/models/heads/pfld_head.py +++ b/edgelab/models/heads/pfld_head.py @@ -1,4 +1,4 @@ -from typing import Sequence, Optional, Union +from typing import Sequence, Union import torch import torch.nn as nn from edgelab.registry import HEADS, LOSSES @@ -40,7 +40,7 @@ def __init__( act=act_cfg) self.conv2 = CBR(feature_num[0], feature_num[1], - 7, + 2, 1, bias=False, padding=0, @@ -51,8 +51,8 @@ def __init__( self.lossFunction = LOSSES.build(loss_cfg) def forward(self, x): - if isinstance(x,(list,tuple)): - x=x[0] + if isinstance(x, (list, tuple)): + x = x[0] x1 = self.avg_pool(x) x1 = x1.view(x1.size(0), -1) diff --git a/edgelab/models/layers/attention.py b/edgelab/models/layers/attention.py index f751d9b4..010970d8 100644 --- a/edgelab/models/layers/attention.py +++ b/edgelab/models/layers/attention.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Union, Tuple +from typing import List, Union, Tuple from mmengine.model.base_module import BaseModule from edgelab.models.base.general import ConvNormActivation import torch.nn as nn diff --git a/edgelab/models/layers/rep.py b/edgelab/models/layers/rep.py index a0a9a88b..68705d4a 100644 --- a/edgelab/models/layers/rep.py +++ b/edgelab/models/layers/rep.py @@ -8,6 +8,7 @@ # from ..base.general import ConvNormActivation,get_act from edgelab.models.base.general import ConvNormActivation, get_act from edgelab.registry import FUNCTIONS +from edgelab.models.base.general import get_act def padding_weights(weights: Optional[torch.Tensor] = None, @@ -68,9 +69,9 @@ def fuse_conv_norm(block: Union[nn.Sequential, nn.BatchNorm2d, nn.LayerNorm], std = (norm_var + norm_eps).sqrt() t = (norm_gamm / std).reshape(-1, 1, 1, 1) - return conv_weight * t, norm_beta + (0 if conv_bias is None else - conv_bias - - norm_mean) * norm_gamm / std + return conv_weight * t, norm_beta + ( + (0 if conv_bias is None else conv_bias) - + norm_mean) * norm_gamm / std elif isinstance(block, nn.BatchNorm2d): in_channels = block.num_features b = in_channels // groups @@ -102,23 +103,20 @@ def __init__(self, use_dense: bool = True, stride: int = 1, depth: int = 6, - act_cfg: dict = dict(type="LeakyReLU"), + groups: int = 1, + act_cfg: dict = dict(type="ReLU"), init_cfg: Union[dict, List[dict], None] = None): super().__init__(init_cfg) self.depth = depth self.use_res = use_res self.use_dense = use_dense - - if stride > 1: - self.down_sample = nn.MaxPool2d(2, stride=2, padding=0) - else: - self.down_sample = nn.Identity() + self.groups = groups self.conv3x3 = ConvNormActivation(in_channels, out_channels, 3, - 1, + stride, 1, bias=True, activation_layer=None) @@ -145,12 +143,11 @@ def __init__(self, out_channels, 3, padding=1, - stride=1, + stride=stride, bias=True) - self.act = MODELS.build(act_cfg) + self.act = get_act(act_cfg)() def forward(self, x) -> None: - x = self.down_sample(x) if self.training: x = self.conv3x3(x) if self.use_dense: @@ -504,8 +501,7 @@ def rep(self) -> None: try: self.fused_conv.weight.copy_(weights) self.fused_conv.bias.copy_(bias) - except: - + except Exception: self.fused_conv.weight.data = weights self.fused_conv.bias.data = bias @@ -523,7 +519,7 @@ def train(self, mode: bool = True): input = torch.rand(1, 64, 192, 192) pred1 = rep(input, True) print("pred1::", pred1.shape) - rep.eval() + # rep.eval() pred2 = rep(input, False) print("pred2::", pred2.shape) i2 = input diff --git a/edgelab/models/layers/test.py b/edgelab/models/layers/test.py new file mode 100644 index 00000000..6d071928 --- /dev/null +++ b/edgelab/models/layers/test.py @@ -0,0 +1,47 @@ +import torch +import torchvision + +def fuse(conv, bn): + + fused = torch.nn.Conv2d( + conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + bias=True + ) + + # setting weights + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps+bn.running_var))) + fused.weight.copy_( torch.mm(w_bn, w_conv).view(fused.weight.size()) ) + + # setting bias + if conv.bias is not None: + b_conv = conv.bias + else: + b_conv = torch.zeros( conv.weight.size(0) ) + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div( + torch.sqrt(bn.running_var + bn.eps) + ) + fused.bias.copy_( b_conv + b_bn ) + + return fused + +# Testing +# we need to turn off gradient calculation because we didn't write it +torch.set_grad_enabled(False) +x = torch.randn(16, 3, 256, 256) +resnet18 = torchvision.models.resnet18(pretrained=True) +# removing all learning variables, etc +resnet18.eval() +model = torch.nn.Sequential( + resnet18.conv1, + resnet18.bn1 +) +f1 = model.forward(x) +fused = fuse(model[0], model[1]) +f2 = fused.forward(x) +d = (f1 - f2).mean().item() +print("error:",d) \ No newline at end of file diff --git a/edgelab/models/mot/bytetrack.py b/edgelab/models/mot/bytetrack.py index 77b9b639..6b971ffb 100644 --- a/edgelab/models/mot/bytetrack.py +++ b/edgelab/models/mot/bytetrack.py @@ -1,4 +1,3 @@ -from mmcv.runner import BaseModule from mmtrack.models.builder import MODELS from mmtrack.models.mot.base import BaseMultiObjectTracker from mmdet.models.detectors.base import BaseDetector diff --git a/edgelab/models/tf/tf_common.py b/edgelab/models/tf/tf_common.py index 06212941..a939c7f9 100644 --- a/edgelab/models/tf/tf_common.py +++ b/edgelab/models/tf/tf_common.py @@ -1,6 +1,5 @@ import tensorflow as tf from tensorflow import keras -import numpy as np import torch.nn as nn diff --git a/edgelab/visualization/visualizer.py b/edgelab/visualization/visualizer.py index 4c0be1c7..d1a9d61c 100644 --- a/edgelab/visualization/visualizer.py +++ b/edgelab/visualization/visualizer.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Optional, Sequence, Tuple +from typing import List, Optional, Sequence, Tuple from mmengine.dist import master_only from mmengine.structures import InstanceData from mmdet.structures import DetDataSample @@ -126,7 +126,7 @@ def _draw_fomo_instances( elif 'pred' in instances: preds = instances.pred - labelss = instances.labels + # labelss = instances.labels points = [] for pred in preds: pred = pred.permute(0, 2, 3, 1).cpu().numpy()[0] @@ -250,25 +250,28 @@ def add_datasample(self, classes = self.dataset_meta.get('classes', None) sensors = data_sample.sensors - uints = [sensor['units'] for sensor in sensors if sensor['units'] not in uints] - + uints = [] + uints = [ + sensor['units'] for sensor in sensors + if sensor['units'] not in uints + ] + # slice the data into different sensors inputs = [data[i::len(sensors)] for i in range(len(sensors))] - + _, axs = plt.subplots(len(uints), 1) - + texts = [] for j, input in enumerate(inputs): if len(uints) > 1: index = uints.index(sensors[j]['units']) - ax = axs[index] + ax = axs[index] else: ax = axs - + ax.plot(input, label=sensors[j]['name']) ax.set_ylabel(sensors[j]['units']) - - + if draw_gt and 'gt_label' in data_sample: gt_label = data_sample.gt_label idx = gt_label.label.tolist() @@ -298,7 +301,7 @@ def add_datasample(self, ] prefix = 'Prediction: ' texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels)) - + plt.set_title(texts) plt.tight_layout() plt.legend() @@ -306,7 +309,8 @@ def add_datasample(self, fig.canvas.draw() buf = fig.canvas.tostring_rgb() w, h = fig.canvas.get_width_height() - image = np.frombuffer(buf, dtype=np.uint8, count=h*w*3).reshape(h, w, 3) + image = np.frombuffer(buf, dtype=np.uint8, + count=h * w * 3).reshape(h, w, 3) self.set_image(image) drawn_img = self.get_image() diff --git a/tools/analysis/get_flops.py b/tools/analysis/get_flops.py index 68adb3cc..e2df3afa 100644 --- a/tools/analysis/get_flops.py +++ b/tools/analysis/get_flops.py @@ -75,8 +75,8 @@ def inference(args, logger): else: try: h, w = cfg.height, cfg.width - except: - raise ValueError('invalid input shape') + except Exception as e: + raise ValueError('invalid input shape') from e # model model = MODELS.build(cfg.model) diff --git a/tools/dataset_converters/ei2coco.py b/tools/dataset_converters/ei2coco.py index 68c171be..0d3624c9 100644 --- a/tools/dataset_converters/ei2coco.py +++ b/tools/dataset_converters/ei2coco.py @@ -1,6 +1,4 @@ -import numpy as np import argparse -import math import shutil import os import json diff --git a/tools/quan_test.py b/tools/quan_test.py index 5a160a7e..a78b19dd 100644 --- a/tools/quan_test.py +++ b/tools/quan_test.py @@ -1,5 +1,5 @@ import os -from typing import * +from typing import List,AnyStr import ncnn import numpy as np @@ -7,8 +7,8 @@ import tqdm.std from PIL import Image import onnxruntime -from torchvision.transforms import * import tensorflow as tf +from torchvision.transforms import ToTensor,Resize,Grayscale,Compose input_name = 'images' output_name = 'output' @@ -28,15 +28,17 @@ def __init__(self, model: List or AnyStr): if isinstance(model, list): net = ncnn.Net() for p in model: - if p.endswith('param'): param = p - if p.endswith('bin'): bin = p + if p.endswith('param'): + param = p + if p.endswith('bin'): + bin = p net.load_param(param) net.load_model(bin) elif model.endswith('onnx'): try: net = onnx.load(model) onnx.checker.check_model(net) - except: + except Exception: raise 'onnx file have error,please check your onnx export code!' net = onnxruntime.InferenceSession(model) elif model.endswith('tflite'): diff --git a/tools/utils/config.py b/tools/utils/config.py index 670c1aa5..6bbfe459 100644 --- a/tools/utils/config.py +++ b/tools/utils/config.py @@ -25,7 +25,8 @@ def replace(data: str, args: Optional[dict] = None) -> str: Returns: data(str): the replaced string """ - if not args: return data + if not args: + return data for key, value in args.items(): if isinstance(value, (int, float)): data = re.sub(f"^{key}\s?=\s?[^,{key}].*?[^,{key}].*?$\n", diff --git a/tools/utils/inference.py b/tools/utils/inference.py index 9de6beb0..6c86f723 100644 --- a/tools/utils/inference.py +++ b/tools/utils/inference.py @@ -11,7 +11,6 @@ from tqdm.std import tqdm from torch.utils.data import DataLoader -from mmdet.models.utils import samplelist_boxtype2tensor from mmengine.config import Config from mmengine.evaluator import Evaluator from mmengine.structures import InstanceData @@ -28,14 +27,16 @@ def __init__(self, model: List or AnyStr or Tuple): if isinstance(model, list): try: import ncnn - except: + except ImportError: raise ImportError( 'You have not installed ncnn yet, please execute the "pip install ncnn" command to install and run again' ) net = ncnn.Net() for p in model: - if p.endswith('param'): param = p - if p.endswith('bin'): bin = p + if p.endswith('param'): + param = p + if p.endswith('bin'): + bin = p net.load_param(param) net.load_model(bin) # net.opt.use_vulkan_compute = True @@ -43,14 +44,14 @@ def __init__(self, model: List or AnyStr or Tuple): elif model.endswith('onnx'): try: import onnxruntime - except: + except ImportError: raise ImportError( 'You have not installed onnxruntime yet, please execute the "pip install onnxruntime" command to install and run again' ) try: net = onnx.load(model) onnx.checker.check_model(net) - except: + except Exception: raise ValueError( 'onnx file have error,please check your onnx export code!') providers = [ @@ -66,7 +67,7 @@ def __init__(self, model: List or AnyStr or Tuple): elif model.endswith('tflite'): try: import tensorflow as tf - except: + except ImportError: raise ImportError( 'You have not installed tensorflow yet, please execute the "pip install tensorflow" command to install and run again' ) @@ -117,7 +118,7 @@ def __call__(self, elif self.engine == 'ncnn': # ncnn self.inter.opt.use_vulkan_compute = False extra = self.inter.create_extractor() - extra.input(input_name, ncnn.Mat(img[0])) + extra.input(input_name, ncnn.Mat(img[0])) #noqa result = extra.extract(output_name)[1] result = [result[i] for i in range(len(result))] else: # tf @@ -320,6 +321,7 @@ def test(self) -> None: img_path = None t0 = time.time() + # print(inputs.shape) preds = self.model(inputs) self.time_cost += time.time() - t0 @@ -385,7 +387,7 @@ def test(self) -> None: preds = NMS(bbox, conf, classes, - conf_thres=25, + conf_thres=50, bbox_format='xywh') # show det result and save result show_det(preds, @@ -561,14 +563,15 @@ def show_det(pred: np.ndarray, color=(0, 0, 255), thickness=1, fontScale=1) - + print(pred) if show: cv2.imshow(win_name, img) - cv2.waitKey(500) + cv2.waitKey(0) if save_path: img_name = osp.basename(img_file) cv2.imwrite(osp.join(save_path, img_name), img) + return pred diff --git a/tools/utils/iot_camera.py b/tools/utils/iot_camera.py index f12f203f..06e3a783 100644 --- a/tools/utils/iot_camera.py +++ b/tools/utils/iot_camera.py @@ -86,7 +86,7 @@ def pare_data(self, data: bytearray): try: img = Image.open(BytesIO(self.buff)) self.img = np.array(img) - except: + except Exception: self.buff = bytearray() return @@ -153,7 +153,7 @@ def disconnect(self): handle.close() print('Device has been reset!') return True - except: + except Exception: return False def get_rlease_device(self, did, get=True): diff --git a/tools/utils/quant_read.py b/tools/utils/quant_read.py index af588bb3..95ee66e6 100644 --- a/tools/utils/quant_read.py +++ b/tools/utils/quant_read.py @@ -33,7 +33,7 @@ def get_next(self) -> dict: raise StopIteration img =self.process_data(a) return {self.input_name:np.array([img])} - except: + except Exception: return None From 223a7f4cdf4c9b2095691b0816035e4b7d1acf27 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 7 Jul 2023 06:34:17 +0000 Subject: [PATCH 2/3] specification code for pass ci --- edgelab/engine/hooks/logger/pavi.py | 58 +++++++++++----------- edgelab/engine/hooks/logger/tensorboard.py | 14 +++--- edgelab/engine/runner/loops.py | 1 - edgelab/models/__init__.py | 12 ++--- edgelab/models/classifiers/image.py | 1 - edgelab/models/heads/cls_head.py | 4 -- edgelab/models/layers/rep.py | 3 -- edgelab/models/tf/tf_common.py | 2 +- edgelab/models/utils/computer_acc.py | 1 - tools/utils/inference.py | 3 +- 10 files changed, 42 insertions(+), 57 deletions(-) diff --git a/edgelab/engine/hooks/logger/pavi.py b/edgelab/engine/hooks/logger/pavi.py index 3fdfd7cc..365c1d95 100644 --- a/edgelab/engine/hooks/logger/pavi.py +++ b/edgelab/engine/hooks/logger/pavi.py @@ -3,15 +3,13 @@ import json import warnings import os.path as osp -from functools import partial from typing import Optional, Union, Dict import mmcv -import torch from edgelab.registry import HOOKS from mmengine.dist.utils import master_only # from mmcv.runner import HOOKS -from mmengine.dist.utils import master_only + # from mmcv.parallel.scatter_gather import scatter # from mmcv.parallel.utils import is_module_wrapper @@ -129,33 +127,33 @@ def _add_ckpt(self, runner, ckpt_path: str, step: int) -> None: snapshot_file_path=ckpt_path, iteration=step) - def _add_graph(self, runner, step: int) -> None: - from mmcv.runner.iter_based_runner import IterLoader - if is_module_wrapper(runner.model): - _model = runner.model.module - else: - _model = runner.model - device = next(_model.parameters()).device - # Note that if your sampler indices is generated in init method, your - # dataset may be one less. - if isinstance(runner.data_loader, IterLoader): - data = next(iter(runner.data_loader._dataloader)) - else: - data = next(iter(runner.data_loader)) - data = scatter(data, [device.index])[0] - img = data[self.img_key] - with torch.no_grad(): - origin_forward = _model.forward - if hasattr(_model, 'forward_dummy'): - _model.forward = _model.forward_dummy - if self.dummy_forward_kwargs: - _model.forward = partial(_model.forward, - **self.dummy_forward_kwargs) - self.writer.add_graph(_model, - img, - tag=f'{self.run_name}_{step}', - opset_version=self.opset_version) - _model.forward = origin_forward + # def _add_graph(self, runner, step: int) -> None: + # from mmcv.runner.iter_based_runner import IterLoader + # if is_module_wrapper(runner.model): + # _model = runner.model.module + # else: + # _model = runner.model + # device = next(_model.parameters()).device + # # Note that if your sampler indices is generated in init method, your + # # dataset may be one less. + # if isinstance(runner.data_loader, IterLoader): + # data = next(iter(runner.data_loader._dataloader)) + # else: + # data = next(iter(runner.data_loader)) + # data = scatter(data, [device.index])[0] + # img = data[self.img_key] + # with torch.no_grad(): + # origin_forward = _model.forward + # if hasattr(_model, 'forward_dummy'): + # _model.forward = _model.forward_dummy + # if self.dummy_forward_kwargs: + # _model.forward = partial(_model.forward, + # **self.dummy_forward_kwargs) + # self.writer.add_graph(_model, + # img, + # tag=f'{self.run_name}_{step}', + # opset_version=self.opset_version) + # _model.forward = origin_forward @master_only def log(self, runner) -> None: diff --git a/edgelab/engine/hooks/logger/tensorboard.py b/edgelab/engine/hooks/logger/tensorboard.py index 2fdfb179..7fa0c32b 100644 --- a/edgelab/engine/hooks/logger/tensorboard.py +++ b/edgelab/engine/hooks/logger/tensorboard.py @@ -1,4 +1,4 @@ -import os.path as osp +import os from typing import Optional, Dict, Union from edgelab.registry import HOOKS @@ -6,7 +6,7 @@ # from mmcv.runner import HOOKS # from mmcv.runner.dist_utils import master_only -# from mmcv.utils import TORCH_VERSION, digit_version +from mmengine.utils.dl_utils import TORCH_VERSION # from mmcv.runner.hooks.logger.text import TextLoggerHook from .text import TextLoggerHook @@ -35,8 +35,9 @@ def __init__(self, @master_only def before_run(self, runner) -> None: super().before_run(runner) - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.1')): + if not os.path.exists(self.log_dir): + os.makedirs(self.log_dir, exist_ok=True) # type: ignore + if TORCH_VERSION == 'parrots': try: from tensorboardX import SummaryWriter except ImportError: @@ -50,10 +51,7 @@ def before_run(self, runner) -> None: 'Please run "pip install future tensorboard" to install ' 'the dependencies to use torch.utils.tensorboard ' '(applicable to PyTorch 1.1 or higher)') - - if self.log_dir is None: - self.log_dir = osp.join(runner.work_dir, 'tf_logs') - self.writer = SummaryWriter(self.log_dir) + self._tensorboard = SummaryWriter(self.log_dir) @master_only def log(self, runner) -> None: diff --git a/edgelab/engine/runner/loops.py b/edgelab/engine/runner/loops.py index 96b55950..c1255d15 100644 --- a/edgelab/engine/runner/loops.py +++ b/edgelab/engine/runner/loops.py @@ -8,7 +8,6 @@ from mmengine.runner.loops import EpochBasedTrainLoop, BaseLoop from edgelab.registry import LOOPS -from mmengine.registry import RUNNERS @LOOPS.register_module() diff --git a/edgelab/models/__init__.py b/edgelab/models/__init__.py index ea5b7f9d..ac7e23d4 100644 --- a/edgelab/models/__init__.py +++ b/edgelab/models/__init__.py @@ -1,6 +1,6 @@ -from .backbones import * #noqa -from .detectors import * #noqa -from .classifiers import * #noqa -from .heads import * #noqa -from .losses import * #noqa -from .necks import * #noqa \ No newline at end of file +from .backbones import * # noqa +from .detectors import * # noqa +from .classifiers import * # noqa +from .heads import * # noqa +from .losses import * # noqa +from .necks import * # noqa diff --git a/edgelab/models/classifiers/image.py b/edgelab/models/classifiers/image.py index 54c88923..b6feee45 100644 --- a/edgelab/models/classifiers/image.py +++ b/edgelab/models/classifiers/image.py @@ -2,7 +2,6 @@ from typing import List, Optional import torch -import torch.nn as nn import torch.nn.functional as F from edgelab.registry import MODELS diff --git a/edgelab/models/heads/cls_head.py b/edgelab/models/heads/cls_head.py index c9ee060b..bada41b6 100644 --- a/edgelab/models/heads/cls_head.py +++ b/edgelab/models/heads/cls_head.py @@ -1,8 +1,4 @@ -from typing import List, Optional, Tuple, Union - -import torch import torch.nn as nn -import torch.nn.functional as F from edgelab.registry import MODELS diff --git a/edgelab/models/layers/rep.py b/edgelab/models/layers/rep.py index 7f3bb89e..9f783ec6 100644 --- a/edgelab/models/layers/rep.py +++ b/edgelab/models/layers/rep.py @@ -4,10 +4,7 @@ import torch.nn as nn from mmengine.model import BaseModule -# from ..base.general import ConvNormActivation,get_act from edgelab.models.base.general import ConvNormActivation, get_act - -from edgelab.models.base.general import get_act from edgelab.registry import FUNCTIONS, MODELS diff --git a/edgelab/models/tf/tf_common.py b/edgelab/models/tf/tf_common.py index a939c7f9..856e6c4b 100644 --- a/edgelab/models/tf/tf_common.py +++ b/edgelab/models/tf/tf_common.py @@ -188,7 +188,7 @@ def __init__(self, w=None): elif isinstance(w, nn.LeakyReLU): act = keras.layers.LeakyReLU(w.negative_slope) elif isinstance(w, nn.Sigmoid): - act = lambda x: keras.activations.sigmoid(x) + act = lambda x: keras.activations.sigmoid(x) # noqa else: raise Exception(f'no matching TensorFlow activation found for PyTorch activation {w}') self.act = act diff --git a/edgelab/models/utils/computer_acc.py b/edgelab/models/utils/computer_acc.py index bceb5137..30591164 100644 --- a/edgelab/models/utils/computer_acc.py +++ b/edgelab/models/utils/computer_acc.py @@ -23,7 +23,6 @@ def pose_acc(pred, target, hw, th=10): def audio_acc(pred, target): - import numpy as np pred = pred[0] if len(pred.shape)==2 else pred # onnx shape(d,), tflite shape(1,d) pred = pred.argsort()[::-1][:5] correct = (target==pred).astype(float) diff --git a/tools/utils/inference.py b/tools/utils/inference.py index 6c86f723..3baf8af3 100644 --- a/tools/utils/inference.py +++ b/tools/utils/inference.py @@ -118,7 +118,7 @@ def __call__(self, elif self.engine == 'ncnn': # ncnn self.inter.opt.use_vulkan_compute = False extra = self.inter.create_extractor() - extra.input(input_name, ncnn.Mat(img[0])) #noqa + extra.input(input_name, ncnn.Mat(img[0])) # noqa result = extra.extract(output_name)[1] result = [result[i] for i in range(len(result))] else: # tf @@ -229,7 +229,6 @@ def build_target(pred_shape, ori_shape, gt_bboxs): bbox: xyxy """ H, W, C = pred_shape - B = len(gt_bboxs) target_data = torch.zeros(size=(1, *pred_shape)) target_data[..., 0] = 1 From 06e4f3057d04d80e61ae96a64aa8894c28361240 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 7 Jul 2023 06:36:56 +0000 Subject: [PATCH 3/3] optimzier EfficientNet Block --- edgelab/models/backbones/EfficientNet.py | 61 ++++++++++++++---------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/edgelab/models/backbones/EfficientNet.py b/edgelab/models/backbones/EfficientNet.py index f28ef993..7ab25dd2 100644 --- a/edgelab/models/backbones/EfficientNet.py +++ b/edgelab/models/backbones/EfficientNet.py @@ -1,7 +1,6 @@ from typing import Optional, List, Callable from functools import partial import copy -import math from torch import nn import torch from torchvision.ops import StochasticDepth @@ -9,6 +8,7 @@ from mmdet.models.utils.make_divisible import make_divisible from mmengine.model import BaseModule from edgelab.models.base.general import ConvNormActivation, SqueezeExcitation +from edgelab.models.layers.rep import RepConv1x1 class MBConvConfig: @@ -36,12 +36,12 @@ def adjust_depth(num_layers: int, depth_mult: float): class MBConv(nn.Module): - def __init__( - self, - cnf: MBConvConfig, - stochastic_depth_prob: float, - norm_layer: Callable[..., nn.Module], - se_layer: Callable[..., nn.Module] = SqueezeExcitation) -> None: + def __init__(self, + cnf: MBConvConfig, + stochastic_depth_prob: float, + norm_layer: Callable[..., nn.Module], + se_layer: Callable[..., nn.Module] = SqueezeExcitation, + rep: bool = False) -> None: super().__init__() if not (1 <= cnf.stride <= 2): @@ -50,7 +50,7 @@ def __init__( self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels layers: List[nn.Module] = [] - activation_layer = nn.SiLU + activation_layer = nn.ReLU # expand expanded_channels = cnf.adjust_channels(cnf.input_channels, @@ -64,21 +64,29 @@ def __init__( activation_layer=activation_layer)) # depthwise - layers.append( - ConvNormActivation(expanded_channels, - expanded_channels, - kernel_size=cnf.kernel, - stride=cnf.stride, - groups=expanded_channels, - norm_layer=norm_layer, - activation_layer=activation_layer)) + if rep: + layers.append( + RepConv1x1(expanded_channels, + expanded_channels, + stride=cnf.stride, + act_cfg=activation_layer)) - # squeeze and excitation - squeeze_channels = max(1, cnf.input_channels // 4) - layers.append( - se_layer(expanded_channels, - squeeze_channels, - activation=partial(nn.SiLU, inplace=True))) + else: + layers.append( + ConvNormActivation(expanded_channels, + expanded_channels, + kernel_size=cnf.kernel, + stride=cnf.stride, + groups=expanded_channels, + norm_layer=norm_layer, + activation_layer=activation_layer)) + + # squeeze and excitation + squeeze_channels = max(1, cnf.input_channels // 4) + layers.append( + se_layer(expanded_channels, + squeeze_channels, + activation=partial(nn.ReLU, inplace=True))) # project layers.append( @@ -114,7 +122,8 @@ class EfficientNet(BaseModule): ] width_depth_mult = { - 'b0': [0.15, 0.35, 0.2], + 'bt': [0.35, 0.35, 0.2], + 'b0': [1.0, 1.0, 0.2], 'b1': [1.0, 1.1, 0.2], 'b2': [1.1, 1.2, 0.3], 'b3': [1.2, 1.4, 0.3], @@ -131,6 +140,7 @@ def __init__(self, norm_cfg='BN', frozen_stages=-1, norm_eval=False, + rep=False, init_cfg: Optional[dict] = None): super().__init__(init_cfg) @@ -156,7 +166,7 @@ def __init__(self, 3, 2, norm_layer=norm_cfg, - activation_layer='SiLU') + activation_layer='ReLU') total_stage_blocks = sum([cnf.num_layers for cnf in arch_param]) stage_block_id = 0 @@ -170,7 +180,8 @@ def __init__(self, conf.stride = 1 sd_prob = stochastic_depth_prob * float( stage_block_id) / total_stage_blocks - layer.append(MBConv(conf, sd_prob, norm_layer=norm_cfg)) + layer.append( + MBConv(conf, sd_prob, norm_layer=norm_cfg, rep=rep)) stage_block_id += 1 self.add_module(name, nn.Sequential(*layer))