From e5a8af0c917bb88b7b4beb20c418731593dba83e Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Thu, 13 Jul 2023 08:41:04 +0000 Subject: [PATCH 01/13] add sensor data load library --- requirements/base.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/requirements/base.txt b/requirements/base.txt index 378c65be..9f18ab0e 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,5 +1,8 @@ # common albumentations>=1.3.0 + +# sensor +cbor numpy>=1.23.5 # vision From 9465379d7451da7c8d64d0e0a7b927d9d8730f06 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Sat, 15 Jul 2023 11:43:00 +0000 Subject: [PATCH 02/13] add loda Anomaly Detection Algorithm --- edgelab/models/classifiers/__init__.py | 3 +- edgelab/models/classifiers/loda.py | 110 +++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 edgelab/models/classifiers/loda.py diff --git a/edgelab/models/classifiers/__init__.py b/edgelab/models/classifiers/__init__.py index b910af60..14542798 100644 --- a/edgelab/models/classifiers/__init__.py +++ b/edgelab/models/classifiers/__init__.py @@ -1,5 +1,6 @@ from .accelerometer import AccelerometerClassifier from .Audio_speech import Audio_classify from .image import ImageClassifier +from .loda import LODA -__all__ = ['Audio_classify', 'AccelerometerClassifier', 'ImageClassifier'] +__all__ = ['Audio_classify', 'AccelerometerClassifier', 'ImageClassifier', 'LODA'] diff --git a/edgelab/models/classifiers/loda.py b/edgelab/models/classifiers/loda.py new file mode 100644 index 00000000..e33e3c95 --- /dev/null +++ b/edgelab/models/classifiers/loda.py @@ -0,0 +1,110 @@ +import math +from typing import List, Optional, Union + +import torch +from mmcls.structures import ClsDataSample +from mmengine.model import BaseModel + +from edgelab.registry import MODELS + + +@MODELS.register_module() +class LODA(BaseModel): + def __init__( + self, + num_bins: int = 10, + num_cuts: int = 100, + yield_rate=0.9, + init_cfg: Union[dict, List[dict], None] = None, + ) -> None: + super().__init__(init_cfg) + self.num_bins = num_bins + self.num_cuts = num_cuts + self.weights = torch.ones(num_cuts, dtype=torch.float) / num_cuts + + self.histograms = torch.zeros((self.num_cuts, self.num_bins)) + self.limits = torch.zeros((self.num_cuts, self.num_bins + 1)) + self.yield_rate = yield_rate + self.num = 0 + + def parameters(self, recurse: bool = True): + """Returns an iterator over module parameters. + + This is typically passed to an optimizer. + + Args: + recurse (bool): if True, then yields parameters of this module + and all submodules. Otherwise, yields only parameters that + are direct members of this module. + + Yields: + Parameter: module parameter + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> for param in model.parameters(): + >>> print(type(param), param.size()) + (20L,) + (20L, 1L, 5L, 5L) + """ + yield torch.nn.Parameter(torch.randn(1)) + + def forward( + self, inputs: torch.Tensor, data_samples: Optional[List[ClsDataSample]] = None, mode: str = 'tensor' + ) -> Union[dict, List]: + if mode == 'tensor': + return self.loss(inputs) + elif mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + else: + raise ValueError(f'Invalid mode "{mode}".') + + def loss(self, inputs: torch.Tensor, data_samples: Optional[List[ClsDataSample]] = None) -> dict: + x = inputs[0] if isinstance(inputs, list) else inputs + x = x.to('cpu') + pred_scores = torch.zeros(x.shape[0], 1) + + num_features = x.shape[1] + + num_features_sqrt = int(math.sqrt(num_features)) + num_features_zero = num_features - num_features_sqrt + + self.projections_ = torch.randn(self.num_cuts, num_features) + + for i in range(self.num_cuts): + perm = torch.randperm(num_features)[:num_features_zero] + self.projections_[i, perm] = 0.0 + projected_vectors = self.projections_[i, :].unsqueeze(0).matmul(x.T).squeeze(0) + self.histograms[i, :], self.limits[i, :] = torch.histogram( + projected_vectors, bins=self.num_bins, density=False + ) + self.histograms[i, :] += 1e-12 + self.histograms[i, :] /= torch.sum(self.histograms[i, :]) + + inds = torch.searchsorted(self.limits[i, : self.num_bins - 1], projected_vectors, side='left') + pred_scores[:, 0] += -self.weights[i] * torch.log(self.histograms[i, inds]) + + decision_scores: torch.Tensor = (pred_scores / self.num_cuts).ravel() * 1.06 + self.threshold_: torch.Tensor = torch.quantile(decision_scores, self.yield_rate) + return { + 'loss': torch.nn.Parameter(self.threshold_), + 'threshold': self.threshold_, + } + + def predict(self, inputs: torch.Tensor, data_samples: Optional[List[ClsDataSample]] = None) -> List[ClsDataSample]: + x = inputs[0] if isinstance(inputs, list) else inputs + x = x.to('cpu') + pred_scores = torch.zeros(x.shape[0], 1) + for i in range(self.num_cuts): + projected_vectors = self.projections_[i, :].unsqueeze(0).matmul(x.T).squeeze(0) + inds = torch.searchsorted(self.limits[i, : self.num_bins - 1], projected_vectors, side='left') + pred_scores[:, 0] += -self.weights[i] * torch.log(self.histograms[i, inds]) + pred_scores = (pred_scores / self.num_cuts).ravel() + prediction = (pred_scores > self.threshold_).long().ravel() + data_samples[0].set_pred_label(pred_scores).set_pred_score(prediction) + self.num += sum(prediction).item() + + return [data_samples[0]] From d5eeb672ff1703b2b7828b291c166bb7a0759846 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Sat, 15 Jul 2023 11:44:00 +0000 Subject: [PATCH 03/13] add loda config --- configs/anomaly/loda.py | 77 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 configs/anomaly/loda.py diff --git a/configs/anomaly/loda.py b/configs/anomaly/loda.py new file mode 100644 index 00000000..a90a99a8 --- /dev/null +++ b/configs/anomaly/loda.py @@ -0,0 +1,77 @@ +_base_ = '../_base_/default_runtime_cls.py' + + +window_size = 30 +stride = 10 +num_axes = 3 + +model = dict(type='edgelab.LODA', num_bins=10, num_cuts=100, yield_rate=0.9) + + +# dataset settings +dataset_type = 'edgelab.SensorDataset' +data_root = './datasets/aixs-export' +batch_size = 1 +workers = 1 + +shape = [1, num_axes * window_size] + +train_pipeline = [ + # dict(type='edgelab.LoadSensorFromFile'), + dict(type='edgelab.PackSensorInputs'), +] + +test_pipeline = [ + # dict(type='edgelab.LoadSensorFromFile'), + dict(type='edgelab.PackSensorInputs'), +] + +train_dataloader = dict( + batch_size=batch_size, + num_workers=workers, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix='training', + ann_file='info.labels', + window_size=window_size, + stride=stride, + pack=True, + pipeline=train_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=True), +) + + +val_dataloader = dict( + batch_size=batch_size, + num_workers=workers, + shuffle=False, + dataset=dict( + type=dataset_type, + data_root=data_root, + window_size=window_size, + stride=stride, + pack=False, + data_prefix='testing', + ann_file='info.labels', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_evaluator = dict(type='mmcls.Accuracy', topk=(1)) + + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +lr = 0.1 +epochs = 1 +optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='Adam', lr=lr, betas=[0.9, 0.99], weight_decay=0)) + +train_cfg = dict(by_epoch=True, max_epochs=epochs) + +val_cfg = dict() +test_cfg = dict() From c6792070bbd055503688a70264a5bb1cbf21190c Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Mon, 31 Jul 2023 03:02:20 +0000 Subject: [PATCH 04/13] optimizer log printing and saving --- configs/_base_/default_runtime_det.py | 2 +- configs/_base_/default_runtime_pose.py | 2 +- edgelab/engine/hooks/logger/text.py | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/configs/_base_/default_runtime_det.py b/configs/_base_/default_runtime_det.py index d1e9ca56..bc3c5bf1 100644 --- a/configs/_base_/default_runtime_det.py +++ b/configs/_base_/default_runtime_det.py @@ -2,7 +2,7 @@ default_hooks = dict( timer=dict(type='IterTimerHook'), - logger=dict(type='edgelab.TextLoggerHook', interval=1), + logger=dict(type='edgelab.TextLoggerHook', interval=100), param_scheduler=dict(type='ParamSchedulerHook'), checkpoint=dict(type='CheckpointHook', interval=1), sampler_seed=dict(type='DistSamplerSeedHook'), diff --git a/configs/_base_/default_runtime_pose.py b/configs/_base_/default_runtime_pose.py index 6632edbd..505c9433 100644 --- a/configs/_base_/default_runtime_pose.py +++ b/configs/_base_/default_runtime_pose.py @@ -3,7 +3,7 @@ # hooks default_hooks = dict( timer=dict(type='IterTimerHook'), - logger=dict(type='TextLoggerHook', interval=50), + logger=dict(type='TextLoggerHook', interval=100), param_scheduler=dict(type='ParamSchedulerHook'), checkpoint=dict(type='CheckpointHook', interval=10), sampler_seed=dict(type='DistSamplerSeedHook'), diff --git a/edgelab/engine/hooks/logger/text.py b/edgelab/engine/hooks/logger/text.py index fb008d93..b45a5645 100644 --- a/edgelab/engine/hooks/logger/text.py +++ b/edgelab/engine/hooks/logger/text.py @@ -131,9 +131,14 @@ def _progress_log(self, log_dict: dict, runner: Runner, dataloader, idx: int, mo self.bar.set_description(end) - # self.bar.update(runner.val_interval if mode == 'val' else 100) - self.bar.update(self.progressInterval(idx, mode=mode)) - if self.bar.n == len(dataloader): + up_num = self.progressInterval(idx, mode=mode) + if up_num >= self.bar.total * 0.01 or (idx == self.bar.n): + self.bar.update(up_num) + if mode == 'train': + self.trainIdx = idx + else: + self.valIdx = idx + if self.bar.n == self.bar.total: del self.bar def setloglevel(self, runner: Runner, handler: logging.Handler = logging.StreamHandler, level: int = logging.ERROR): @@ -165,19 +170,14 @@ def _get_max_memory(self, runner: Runner) -> int: def progressInterval(self, idx: int, mode: str = 'train'): if mode == 'train': if idx < self.trainIdx: - self.trainIdx = idx res = idx else: res = idx - self.trainIdx - self.trainIdx = idx - else: if idx < self.valIdx: - self.valIdx = idx res = idx else: res = idx - self.valIdx - self.valIdx = idx return res if res else 1 def iterInterval(self, runner: Runner): From 23c7c97584ab94fcb58c7060f476036b536d92a2 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 4 Aug 2023 12:16:09 +0000 Subject: [PATCH 05/13] optimizer train time --- edgelab/models/detectors/fomo.py | 77 ++++++++++++++++++++++++++++++- edgelab/models/heads/fomo_head.py | 51 ++++++++------------ 2 files changed, 94 insertions(+), 34 deletions(-) diff --git a/edgelab/models/detectors/fomo.py b/edgelab/models/detectors/fomo.py index a42b9f96..c893c040 100644 --- a/edgelab/models/detectors/fomo.py +++ b/edgelab/models/detectors/fomo.py @@ -1,10 +1,14 @@ -from typing import Dict, Optional +from typing import Dict, List, Optional, Tuple, Union import torch from mmdet.models.detectors.single_stage import SingleStageDetector +from mmdet.structures import DetDataSample, OptSampleList +from mmengine.optim import OptimWrapper from edgelab.registry import MODELS +ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample], Tuple[torch.Tensor], torch.Tensor] + @MODELS.register_module() class Fomo(SingleStageDetector): @@ -14,13 +18,63 @@ def __init__( neck: Optional[Dict] = None, head: Optional[Dict] = None, data_preprocessor: Optional[Dict] = None, + skip_preprocessor: bool = False, train_cfg: Optional[Dict] = None, test_cfg: Optional[Dict] = None, pretrained: Optional[Dict] = None, init_cfg: Optional[Dict] = None, ): + # data_preprocessor=None + self.skip_preprocessor = skip_preprocessor super().__init__(backbone, neck, head, train_cfg, test_cfg, data_preprocessor, init_cfg) + def forward(self, inputs: torch.Tensor, data_samples: OptSampleList = None, mode: str = 'tensor') -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DetDataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle either back propagation or + parameter update, which are supposed to be done in :meth:`train_step`. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (list[:obj:`DetDataSample`], optional): A batch of + data samples that contain annotations and predictions. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of :obj:`DetDataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if isinstance(inputs, list): + inputs = torch.stack(inputs, dim=0).to(self.data_preprocessor.device) + if self.skip_preprocessor: + if inputs.dtype == torch.uint8: + inputs = inputs / 255 + if inputs.dtype == torch.int8: + inputs = inputs / 128 + + if mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + elif mode == 'tensor': + return self._forward(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode') + def _forward(self, batch_inputs, batch_data_samples): """Network forward process. Usually includes backbone, neck and head forward without any post-processing. @@ -36,4 +90,23 @@ def _forward(self, batch_inputs, batch_data_samples): """ x = self.extract_feat(batch_inputs) results = self.bbox_head.forward(x) - return [torch.softmax(pred, dim=1) for pred in results] + return torch.softmax(results[0].permute(0, 2, 3, 1), dim=-1) + + def train_step(self, data: Union[dict, tuple, list], optim_wrapper: OptimWrapper) -> Dict[str, torch.Tensor]: + with optim_wrapper.optim_context(self): + if not self.skip_preprocessor: + data = self.data_preprocessor(data, True) + losses = self._run_forward(data, mode='loss') # type: ignore + parsed_losses, log_vars = self.parse_losses(losses) # type: ignore + optim_wrapper.update_params(parsed_losses) + return log_vars + + def val_step(self, data: [tuple, dict, list]) -> list: + if not self.skip_preprocessor: + data = self.data_preprocessor(data, False) + return self._run_forward(data, 'predict') + + def test_step(self, data: Union[dict, tuple, list]) -> list: + if not self.skip_preprocessor: + data = self.data_preprocessor(data, False) + return self._run_forward(data, 'predict') diff --git a/edgelab/models/heads/fomo_head.py b/edgelab/models/heads/fomo_head.py index db20e5ca..5d048ea3 100644 --- a/edgelab/models/heads/fomo_head.py +++ b/edgelab/models/heads/fomo_head.py @@ -106,7 +106,6 @@ def forward(self, x: Tuple[torch.Tensor, ...]): def loss(self, inputs: Tuple[torch.Tensor, ...], data_samples): pred = self.forward(inputs) - gt = unpack_gt_instances(data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = gt @@ -118,50 +117,37 @@ def predict(self, features, batch_data_samples, rescale=False): preds = self.forward(features) preds = tuple([F.softmax(pred, dim=1) for pred in preds]) - img_shape = batch_data_samples[0].metainfo['img_shape'] - batch_gt_instances = [data_samples.gt_instances for data_samples in batch_data_samples] + batch_data_samples[0].metainfo['img_shape'] + # batch_gt_instances = [data_samples.gt_instances for data_samples in batch_data_samples] + batch_gt_instances = [data_samples.fomo_mask for data_samples in batch_data_samples] return [ InstanceData( pred=preds, labels=tuple( - [self.build_target(pred.shape[2:], img_shape, batch_gt_instances, pred.device) for pred in preds] + [ + torch.concat( + [torch.from_numpy(batch_gt_instances[i][0]) for i in range(len(batch_gt_instances))] + ).to(preds[0].device) + ] ), ) ] def loss_by_feat(self, preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) -> dict: device = preds[0].device - input_shape = batch_img_metas[0]['img_shape'] # batch_input_shape + batch_img_metas[0]['img_shape'] # batch_input_shape # Get the ground truth box that fits the fomo model - target = [self.build_target(pred.shape[2:], input_shape, batch_gt_instances, device) for pred in preds] + # target = [self.build_target(pred.shape[2:], input_shape, batch_gt_instances, device) for pred in preds] + target = [ + torch.concat( + [torch.from_numpy(batch_img_metas[i]['fomo_mask'][0]) for i in range(len(batch_img_metas))] + ).to(device) + ] loss, cls_loss, bg_loss, P, R, F1 = multi_apply(self.lossFunction, preds, target) return dict(loss=loss, fgnd=cls_loss, bgnd=bg_loss, P=P, R=R, F1=F1) - def build_target(self, pred_shape, ori_shape, gt_bboxs, device): - """ - The target feature map constructed according to the size - of the feature map output by the model - bbox: xyxy - """ - H, W = pred_shape - B = len(gt_bboxs) - - target_data = torch.zeros(size=(B, *pred_shape, self.num_attrib), device=device) - target_data[..., 0] = 1 - esp = 1e-5 - fw = W / (2 * ori_shape[1]) - fh = H / (2 * ori_shape[0]) - for b, bboxs in enumerate(gt_bboxs): - for idx, bbox in enumerate(bboxs.bboxes): - w = torch.mul((bbox[2] + bbox[0]), fw) - h = torch.mul((bbox[3] + bbox[1]), fh) - h, w = int(h.item() - esp), int(w.item() - esp) - target_data[b, h, w, 0] = 0 # background - target_data[b, h, w, bboxs.labels[idx] + 1] = 1 # label - return target_data - def lossFunction(self, pred_maps: torch.Tensor, data: torch.Tensor): """Calculate the loss of the model Args: @@ -176,9 +162,10 @@ def lossFunction(self, pred_maps: torch.Tensor, data: torch.Tensor): preds = pred_maps.permute(0, 2, 3, 1) B, H, W, C = preds.shape # pos_weights - weight = torch.zeros(self.num_attrib, device=preds.device) - weight[0] = 1 - self.weight_mask = torch.tile(weight, (H, W, 1)) + if not hasattr(self, 'weight_mask'): + weight = torch.zeros(self.num_attrib, device=preds.device) + weight[0] = 1 + self.weight_mask = torch.tile(weight, (H, W, 1)) # background loss bg_loss = self.loss_bg( From 294c4148b454172f57548577d43049ab93fea5d1 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 4 Aug 2023 12:25:23 +0000 Subject: [PATCH 06/13] fix evaluter meta classinfo is None bug --- tools/train.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/train.py b/tools/train.py index da067643..88442c16 100644 --- a/tools/train.py +++ b/tools/train.py @@ -187,22 +187,23 @@ def main(): from mmengine.runner import Runner runner = Runner.from_cfg(cfg) + runner.val_evaluator.dataset_meta = runner.val_dataloader.dataset.METAINFO else: from mmengine.registry import RUNNERS runner = RUNNERS.build(cfg) + runner.val_evaluator.dataset_meta = runner.val_dataloader.dataset.METAINFO model = runner.model.to('cpu') model.eval() analysis_results = get_model_complexity_info(model=model, input_shape=tuple(args.input_shape[1:])) - + runner.model.cuda() print('=' * 40) print(f"{'Input Shape':^20}:{str(args.input_shape):^20}") print(f"{'Model Flops':^20}:{analysis_results['flops_str']:^20}") print(f"{'Model Parameters':^20}:{analysis_results['params_str']:^20}") print('=' * 40) - runner.train() From 336c0f3a7f6345677e652885d06d41df494a0665 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 4 Aug 2023 12:27:17 +0000 Subject: [PATCH 07/13] add fomo mask build --- edgelab/datasets/pipelines/transforms.py | 33 +++++++++++++----------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/edgelab/datasets/pipelines/transforms.py b/edgelab/datasets/pipelines/transforms.py index 7ab34b83..024b50d0 100644 --- a/edgelab/datasets/pipelines/transforms.py +++ b/edgelab/datasets/pipelines/transforms.py @@ -1,6 +1,7 @@ +import copy from typing import Dict, List, Optional, Tuple, Union -import torch +import numpy as np from mmcv.transforms.base import BaseTransform from edgelab.registry import TRANSFORMS @@ -11,33 +12,35 @@ class Bbox2FomoMask(BaseTransform): def __init__( self, downsample_factor: Tuple[int, ...] = (8,), - classes_num: int = 80, + num_classes: int = 80, ) -> None: super().__init__() self.downsample_factor = downsample_factor - self.classes_num = classes_num + self.num_classes = num_classes def transform(self, results: Dict) -> Optional[Union[Dict, Tuple[List, List]]]: + results['img'] H, W = results['img_shape'] bbox = results['gt_bboxes'] - print(bbox) + labels = results['gt_bboxes_labels'] res = [] for factor in self.downsample_factor: - Dh, Dw = H / factor, W / factor - target = self.build_target(bbox, shape=(Dh, Dw)) + Dh, Dw = int(H / factor), int(W / factor) + target = self.build_target(bbox, feature_shape=(Dh, Dw), ori_shape=(W, H), labels=labels) res.append(target) - results['fomo_mask'] = res + results['fomo_mask'] = copy.deepcopy(res) return results - def build_target(self, targets, shape): - (H, W) = shape - target_data = torch.zeros(size=(H, W, self.classes_num + 1)) + def build_target(self, bboxs, feature_shape, ori_shape, labels): + (H, W) = feature_shape + # target_data = torch.zeros(size=(1,H, W, self.num_classes + 1)) + target_data = np.zeros((1, H, W, self.num_classes + 1)) target_data[..., 0] = 1 - for i in targets: - h, w = int(i[3].item() * H), int(i[2].item() * W) - target_data[int(i[0]), h, w, 0] = 0 # background - target_data[int(i[0]), h, w, int(i[1])] = 1 # label - + for idx, i in enumerate(bboxs): + w = int(i.centers[0][0] / ori_shape[0] * H) + h = int(i.centers[0][1] / ori_shape[1] * W) + target_data[0, h, w, 0] = 0 # background + target_data[0, h, w, int(labels[idx] + 1)] = 1 # label return target_data From 03a91186e8bd3ecb5f8c2727017788ec3eed8048 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 4 Aug 2023 12:35:49 +0000 Subject: [PATCH 08/13] optimizer fomo train config --- .../fomo/fomo_mobnetv2_0.35_x8_abl_coco.py | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/configs/fomo/fomo_mobnetv2_0.35_x8_abl_coco.py b/configs/fomo/fomo_mobnetv2_0.35_x8_abl_coco.py index 91e77dc7..6d04c8ca 100644 --- a/configs/fomo/fomo_mobnetv2_0.35_x8_abl_coco.py +++ b/configs/fomo/fomo_mobnetv2_0.35_x8_abl_coco.py @@ -5,7 +5,7 @@ visualizer = dict(type='FomoLocalVisualizer', fomo=True) -num_classes = 2 +num_classes = 1 data_preprocessor = dict( type='mmdet.DetDataPreprocessor', mean=[0, 0, 0], std=[255.0, 255.0, 255.0], bgr_to_rgb=True, pad_size_divisor=32 ) @@ -22,6 +22,7 @@ loss_cls=dict(type='BCEWithLogitsLoss', reduction='none', pos_weight=40), loss_bg=dict(type='BCEWithLogitsLoss', reduction='none'), ), + skip_preprocessor=True, ) # dataset settings @@ -33,7 +34,6 @@ workers = 1 albu_train_transforms = [ - dict(type='RandomResizedCrop', height=height, width=width, scale=(0.80, 1.2), p=1), dict(type='Rotate', limit=30), dict(type='RandomBrightnessContrast', brightness_limit=0.3, contrast_limit=0.3, p=0.5), dict(type='Blur', p=0.01), @@ -46,24 +46,40 @@ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='mmdet.LoadAnnotations', with_bbox=True), ] + train_pipeline = [ *pre_transform, + dict(type='mmdet.Resize', scale=(height, width)), dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict(type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={'img': 'image', 'gt_bboxes': 'bboxes'}, ), + dict(type='Bbox2FomoMask', downsample_factor=(8,), num_classes=num_classes), dict( type='mmdet.PackDetInputs', - meta_keys=('img_path', 'img_id', 'instances', 'img_shape', 'ori_shape', 'gt_bboxes', 'gt_bboxes_labels'), + meta_keys=( + 'fomo_mask', + 'img_path', + 'img_id', + 'instances', + 'img_shape', + 'ori_shape', + 'gt_bboxes', + 'gt_bboxes_labels', + ), ), ] test_pipeline = [ *pre_transform, dict(type='mmdet.Resize', scale=(height, width)), - dict(type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')), + dict(type='Bbox2FomoMask', downsample_factor=(8,), num_classes=num_classes, ori_shape=(height, width)), + dict( + type='mmdet.PackDetInputs', + meta_keys=('fomo_mask', 'img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor'), + ), ] train_dataloader = dict( @@ -103,13 +119,14 @@ find_unused_parameters = True +# optim_wrapper = dict(type="AmpOptimWrapper",optimizer=dict(type='Adam', lr=lr, weight_decay=5e-4, eps=1e-7)) optim_wrapper = dict(optimizer=dict(type='Adam', lr=lr, weight_decay=5e-4, eps=1e-7)) # evaluator val_evaluator = dict(type='FomoMetric') test_evaluator = val_evaluator -train_cfg = dict(by_epoch=True, max_epochs=epochs) +train_cfg = dict(by_epoch=True, max_epochs=epochs, val_interval=5) # learning policy param_scheduler = [ @@ -123,3 +140,4 @@ by_epoch=True, ), ] +# cfg=dict(compile=True) From ff34e1a9ecde12db68b6a5eb5ed9ae9da4a8ef95 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Fri, 4 Aug 2023 12:38:41 +0000 Subject: [PATCH 09/13] delete old code --- edgelab/datasets/cocodataset.py | 149 +++----------------------------- 1 file changed, 11 insertions(+), 138 deletions(-) diff --git a/edgelab/datasets/cocodataset.py b/edgelab/datasets/cocodataset.py index 27baa96e..44b1f82f 100644 --- a/edgelab/datasets/cocodataset.py +++ b/edgelab/datasets/cocodataset.py @@ -1,13 +1,8 @@ import json import os.path as osp -from collections import OrderedDict -from typing import Callable, List, Optional, Sequence, Union +from typing import Optional, Sequence -import cv2 -import numpy as np -import torch from mmdet.datasets.coco import CocoDataset -from sklearn.metrics import confusion_matrix from edgelab.registry import DATASETS @@ -105,26 +100,19 @@ class CustomCocoDataset(CocoDataset): def __init__( self, + *args, + data_prefix: dict = dict(img_path=''), ann_file: str = '', metainfo: Optional[dict] = None, - data_root=None, - data_prefix: dict = dict(img_path=''), - filter_cfg: Optional[dict] = None, - indices: Optional[Union[int, Sequence[int]]] = None, - serialize_data: bool = True, - pipeline: List[Union[dict, Callable]] = [], - test_mode: bool = False, - lazy_init: bool = False, - max_refetch: int = 1000, + data_root: str = '', filter_supercat: bool = True, - file_client_args: Optional[dict] = dict(backend='disk'), - classes=None, + classes: Optional[Sequence[str]] = None, **kwargs, ): if data_root: if not (osp.isabs(ann_file) and (osp.isabs(data_prefix['img']))): data_root = check_file(data_root, data_name='coco') if data_root else data_root - if metainfo is None and not self.METAINFO['classes'] and not classes: + if metainfo is None and not self.METAINFO['classes']: if not osp.isabs(ann_file) and ann_file: self.ann_file = osp.join(data_root, ann_file) with open(self.ann_file, 'r') as f: @@ -138,125 +126,10 @@ def __init__( self.METAINFO['classes'] = classes super().__init__( - ann_file, - metainfo, - data_root, - data_prefix, - filter_cfg, - indices, - serialize_data, - pipeline, - test_mode, - lazy_init, - max_refetch, + *args, + data_prefix=data_prefix, + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, **kwargs, ) - - def bboxe2cell(self, bboxe, img_h, img_w, H, W): - w = (bboxe[0] + bboxe[2]) / 2 - h = (bboxe[1] + bboxe[3]) / 2 - w = w / img_w - h = h / img_h - x = int(w * (W - 1)) - y = int(h * (H - 1)) - return (x, y) - - def build_target(self, preds, targets, img_h, img_w): - B, H, W = preds.shape - target_data = torch.zeros(size=(B, H, W), device=preds.device) - target_data[..., 0] = 0 - bboxes = targets['bboxes'] - labels = targets['labels'] - - bboxes = [self.bboxe2cell(bboxe, img_h, img_w, H, W) for bboxe in bboxes] - - for bboxe, label in zip(bboxes, labels): - target_data[0, bboxe[1], bboxe[0]] = label + 1 # label - - return target_data - - def compute_FTP(self, pred, target): - confusion = confusion_matrix( - target.flatten().cpu().numpy(), pred.flatten().cpu().numpy(), labels=range(len(self.CLASSES) + 1) - ) - tn = confusion[0, 0] - tp = np.diagonal(confusion).sum() - tn - fn = np.tril(confusion, k=-1).sum() - fp = np.triu(confusion, k=1).sum() - - return tp, fp, fn - - def computer_prf(self, tp, fp, fn): - if tp == 0 and fn == 0 and fp == 0: - return 1.0, 1.0, 1.0 - - p = 0.0 if (tp + fp == 0) else tp / (tp + fp) - r = 0.0 if (tp + fn == 0) else tp / (tp + fn) - f1 = 0.0 if (p + r == 0) else 2 * (p * r) / (p + r) - return p, r, f1 - - def evaluate( - self, - results, - metric='bbox', - logger=None, - jsonfile_prefix=None, - classwise=False, - proposal_nums=..., - iou_thrs=None, - fomo=False, - metric_items=None, - ): - if fomo: # just with here evaluate for fomo data - annotations = [self.get_ann_info(i) for i in range(len(self))] - eval_results = OrderedDict() - tmp = [] - - TP, FP, FN = [], [], [] - for idx, (pred, ann) in enumerate(zip(results, annotations)): - data = self.__getitem__(idx) - B, H, W = pred.shape - img_h, img_w = data['img_metas'][0].data['ori_shape'][:2] - target = self.build_target(pred, ann, img_h, img_w) - tp, fp, fn = self.compute_FTP(pred, target) - mask = torch.eq(pred, target) - acc = torch.sum(mask) / (H * W) - tmp.append(acc) - TP.append(tp) - FP.append(fp) - FN.append(fn) - # show_result(pred,data['img_metas'][0].data['filename'],self.CLASSES) - P, R, F1 = self.computer_prf(sum(TP), sum(FP), sum(FN)) - eval_results['Acc'] = torch.mean(torch.Tensor(tmp)).cpu().item() - eval_results['Acc'] = torch.mean(torch.Tensor(tmp)).cpu().item() - eval_results['P'] = P - eval_results['R'] = R - eval_results['F1'] = F1 - return eval_results - - return super().evaluate( - results, metric, logger, jsonfile_prefix, classwise, proposal_nums, iou_thrs, metric_items - ) - - -def show_result(result, img_path, classes): - img = cv2.imread(img_path) - H, W = img.shape[:-1] - pred = result.cpu().numpy() - mask = np.argwhere(pred > 0) - for i in mask: - b, h, w = i - label = classes[pred[0, h, w] - 1] - cv2.circle( - img, (int(W / result[0].shape[1] * (w + 0.5)), int(H / result[0].shape[0] * (h + 0.5))), 5, (0, 0, 255), 1 - ) - cv2.putText( - img, - str(label), - org=(int(W / result[0].shape[1] * w), int(H / result[0].shape[0] * h)), - color=(255, 0, 0), - fontScale=1, - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - ) - cv2.imshow('img', img) - cv2.waitKey(0) From e5c182447093aa1f11ba15f399af311404a2b4c8 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Mon, 7 Aug 2023 06:02:01 +0000 Subject: [PATCH 10/13] add classify visualizer --- tools/utils/inference.py | 69 ++++++---------------------------------- 1 file changed, 10 insertions(+), 59 deletions(-) diff --git a/tools/utils/inference.py b/tools/utils/inference.py index 831ba9e9..d97962c3 100644 --- a/tools/utils/inference.py +++ b/tools/utils/inference.py @@ -4,7 +4,6 @@ from typing import AnyStr, List, Optional, Sequence, Tuple, Union import cv2 -import mmcv import numpy as np import onnx import torch @@ -13,10 +12,10 @@ from mmengine.evaluator import Evaluator from mmengine.registry import MODELS from mmengine.structures import InstanceData +from mmengine.visualization.visualizer import Visualizer from torch.utils.data import DataLoader from tqdm.std import tqdm -from edgelab.models.utils.computer_acc import audio_acc, pose_acc from edgelab.utils.cv import NMS, load_image from .iot_camera import IoTCamera @@ -284,6 +283,8 @@ def init(self, cfg): self.evaluator.dataset_meta = self.dataloader.dataset.METAINFO if hasattr(cfg.model, 'data_preprocessor'): self.data_preprocess = MODELS.build(cfg.model.data_preprocessor) + if hasattr(cfg, 'visualizer'): + self.visualizer: Visualizer = Visualizer.get_current_instance() def post_process(self): pass @@ -291,7 +292,6 @@ def post_process(self): def test(self) -> None: self.time_cost = 0 self.preds = [] - P = [] R = [] F1 = [] @@ -309,7 +309,6 @@ def test(self) -> None: img_path = None t0 = time.time() - # print(inputs.shape) preds = self.model(inputs) self.time_cost += time.time() - t0 @@ -386,6 +385,13 @@ def test(self) -> None: self.evaluator.process(data_batch=data, data_samples=data['data_samples']) + elif self.task == 'cls': + if inputs.dtype == np.float32: + inputs = inputs * 255 + self.visualizer.set_image(inputs) + label = np.argmax(preds[0], axis=1) + self.visualizer = self.visualizer.draw_texts(str(label[0]), np.asarray([[1, 1]]), font_sizes=6) + self.visualizer.show() else: raise ValueError if not self.source: @@ -399,60 +405,6 @@ def test(self) -> None: print(f'FPS: {len(self.dataloader)/self.time_cost:2f} fram/s') -def pfld_inference(model, data_loader): - results = [] - prog_bar = mmcv.ProgressBar(len(data_loader)) - for data in data_loader: - # parse data - input = data.dataset['img'] - target = np.expand_dims(data.dataset['keypoints'], axis=0) - size = data.dataset['hw'] # .cpu().numpy() - input = input.cpu().numpy() - result = model(input) - result = np.array(result) - result = result if len(result.shape) == 2 else result[None, :] # onnx shape(2,), tflite shape(1,2) - acc = pose_acc(result.copy(), target, size) - results.append({'Acc': acc, 'pred': result, 'image_file': data.dataset['image_file'].data}) - - prog_bar.update() - return results - - -def audio_inference(model, data_loader): - results = [] - prog_bar = mmcv.ProgressBar(len(data_loader)) - for data in data_loader: - # parse data - input = data.dataset['audio'] - target = data.dataset['labels'] - input = input.cpu().numpy() - result = model(input) - # result = result if len(result.shape)==2 else np.expand_dims(result, 0) # onnx shape(d,), tflite shape(1,d) - # result = result[0] if len(result.shape)==2 else result - acc = audio_acc(result, target) - results.append({'acc': acc, 'pred': result, 'image_file': data.dataset['audio_file']}) - prog_bar.update() - return results - - -def fomo_inference(model, data_loader): - results = [] - prog_bar = mmcv.ProgressBar(len(data_loader)) - for data in data_loader: - input = data.dataset['img'] - input = input.cpu().numpy() - target = data.dataset['target'] - result = model(input) - results.append( - { - 'pred': result, - 'target': target, - } - ) - prog_bar.update() - return results - - def show_point( keypoints: Union[np.ndarray, Sequence[Sequence[int]], None] = None, img: Optional[np.ndarray] = None, @@ -508,7 +460,6 @@ def show_det( img = cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 1) cv2.putText(img, class_name[int(i[5])], (x1, y1), 1, color=(0, 0, 255), thickness=1, fontScale=1) cv2.putText(img, str(round(i[4].item(), 2)), (x1, y1 - 15), 1, color=(0, 0, 255), thickness=1, fontScale=1) - print(pred) if show: cv2.imshow(win_name, img) cv2.waitKey(0) From 3f2913a5b896ee466e79ee40935bd2b6913cd015 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Tue, 8 Aug 2023 11:07:32 +0000 Subject: [PATCH 11/13] add module import file --- edgelab/datasets/utils/__init__.py | 3 +++ edgelab/engine/utils/__init__.py | 3 +++ edgelab/models/base/__init__.py | 2 ++ edgelab/models/mot/__init__.py | 1 + edgelab/models/tf/__init__.py | 1 + edgelab/models/utils/__init__.py | 2 ++ 6 files changed, 12 insertions(+) create mode 100644 edgelab/datasets/utils/__init__.py create mode 100644 edgelab/engine/utils/__init__.py create mode 100644 edgelab/models/base/__init__.py create mode 100644 edgelab/models/mot/__init__.py create mode 100644 edgelab/models/tf/__init__.py create mode 100644 edgelab/models/utils/__init__.py diff --git a/edgelab/datasets/utils/__init__.py b/edgelab/datasets/utils/__init__.py new file mode 100644 index 00000000..d30d257f --- /dev/null +++ b/edgelab/datasets/utils/__init__.py @@ -0,0 +1,3 @@ +from .audio_augs import * # noqa +from .download import * # noqa +from .functions import * # noqa diff --git a/edgelab/engine/utils/__init__.py b/edgelab/engine/utils/__init__.py new file mode 100644 index 00000000..24831635 --- /dev/null +++ b/edgelab/engine/utils/__init__.py @@ -0,0 +1,3 @@ +from .batch_augs import * # noqa +from .helper_funcs import * # noqa +from .resample import * # noqa diff --git a/edgelab/models/base/__init__.py b/edgelab/models/base/__init__.py new file mode 100644 index 00000000..9458d5c2 --- /dev/null +++ b/edgelab/models/base/__init__.py @@ -0,0 +1,2 @@ +from .data_preprocessor import * # noqa +from .general import * # noqa diff --git a/edgelab/models/mot/__init__.py b/edgelab/models/mot/__init__.py new file mode 100644 index 00000000..1f487b53 --- /dev/null +++ b/edgelab/models/mot/__init__.py @@ -0,0 +1 @@ +from .bytetrack import * # noqa diff --git a/edgelab/models/tf/__init__.py b/edgelab/models/tf/__init__.py new file mode 100644 index 00000000..358d2189 --- /dev/null +++ b/edgelab/models/tf/__init__.py @@ -0,0 +1 @@ +from .tf_common import * # noqa diff --git a/edgelab/models/utils/__init__.py b/edgelab/models/utils/__init__.py new file mode 100644 index 00000000..507604a2 --- /dev/null +++ b/edgelab/models/utils/__init__.py @@ -0,0 +1,2 @@ +from .computer_acc import * # noqa +from .metrics import * # noqa From dbf4545df3141d1c52f4c6b10fc0fd48fd75c25e Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Tue, 8 Aug 2023 11:55:22 +0000 Subject: [PATCH 12/13] fix model params grad is false bug --- edgelab/models/layers/__init__.py | 2 ++ edgelab/models/layers/test.py | 43 ------------------------------- 2 files changed, 2 insertions(+), 43 deletions(-) create mode 100644 edgelab/models/layers/__init__.py delete mode 100644 edgelab/models/layers/test.py diff --git a/edgelab/models/layers/__init__.py b/edgelab/models/layers/__init__.py new file mode 100644 index 00000000..a63089ad --- /dev/null +++ b/edgelab/models/layers/__init__.py @@ -0,0 +1,2 @@ +from .attention import * # noqa +from .rep import * # noqa diff --git a/edgelab/models/layers/test.py b/edgelab/models/layers/test.py deleted file mode 100644 index 15da71ee..00000000 --- a/edgelab/models/layers/test.py +++ /dev/null @@ -1,43 +0,0 @@ -import torch -import torchvision - - -def fuse(conv, bn): - fused = torch.nn.Conv2d( - conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - bias=True, - ) - - # setting weights - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fused.weight.copy_(torch.mm(w_bn, w_conv).view(fused.weight.size())) - - # setting bias - if conv.bias is not None: - b_conv = conv.bias - else: - b_conv = torch.zeros(conv.weight.size(0)) - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fused.bias.copy_(b_conv + b_bn) - - return fused - - -# Testing -# we need to turn off gradient calculation because we didn't write it -torch.set_grad_enabled(False) -x = torch.randn(16, 3, 256, 256) -resnet18 = torchvision.models.resnet18(pretrained=True) -# removing all learning variables, etc -resnet18.eval() -model = torch.nn.Sequential(resnet18.conv1, resnet18.bn1) -f1 = model.forward(x) -fused = fuse(model[0], model[1]) -f2 = fused.forward(x) -d = (f1 - f2).mean().item() -print('error:', d) From 6e6bd9881be5b1805ace6eec5ad2937d56defa32 Mon Sep 17 00:00:00 2001 From: mjq2020 Date: Wed, 9 Aug 2023 07:09:21 +0000 Subject: [PATCH 13/13] Fix environment inconsistency bug --- .../Google-Colab-YOLOv5-A1101-Example.ipynb | 126 ++++-------------- 1 file changed, 26 insertions(+), 100 deletions(-) diff --git a/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb b/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb index cffb8a24..18af0885 100644 --- a/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb +++ b/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb @@ -1,13 +1,12 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "mNESTCLfvSrY" }, "source": [ - "# Welcom to EdgeLab for Google Colab Training Example 🔥 \n", + "# Welcom to EdgeLab for Google Colab Training Example 🔥\n", "\n", "\"Open **[🚀🚀🚀 One-Click to Deploy to Google Colab 🚀🚀🚀](https://colab.research.google.com/github/Seeed-Studio/EdgeLab/blob/main/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb)**\n", "\n", @@ -28,7 +27,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "PRGi8-2jvxzR" @@ -38,7 +36,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "Ikzx6C6wrpos" @@ -55,77 +52,36 @@ }, "outputs": [], "source": [ - "!git clone https://github.com/Seeed-Studio/EdgeLab.git # currently we're using experimental 2.0 version branch" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "-4H83YK8waNA" - }, - "outputs": [], - "source": [ + "!git clone https://github.com/Seeed-Studio/EdgeLab.git # currently we're using experimental 2.0 version branch\n", "%cd /content/EdgeLab" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "LWAE--J9sAmW" }, "source": [ - "**Step 1:** Use `python3.8` as default python in colab, then install corresponding version python and pip package." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "STOKRHwXs70f" - }, - "outputs": [], - "source": [ - "%env PYTHON_EXEC=python3.8" + "**Step 1:** Install python third-party library" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xF4nReUbqbUA" - }, - "outputs": [], "source": [ - "!sudo apt-get update\n", - "!sudo apt-get install ${PYTHON_EXEC}-dev python3-pip -y" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", + "!pip install -r requirements/base.txt\n", + "!pip install -r requirements/export.txt\n", + "!pip install -r requirements/inference.txt\n", + "!pip install -r requirements/pytorch_cuda.txt\n", + "!mim install -r requirements/mmlab.txt\n", + "!pip install -e ." + ], "metadata": { - "id": "zX3aW3Rys1Zr" + "id": "EPSObA30OADp" }, - "source": [ - "**Step 2:** Use `scripts/setup_colab.sh` to automatically setup EdgeLab dependencies." - ] - }, - { - "cell_type": "code", "execution_count": null, - "metadata": { - "id": "k1HIXVgCzrCz" - }, - "outputs": [], - "source": [ - "!chmod +x scripts/setup_colab.sh\n", - "!scripts/setup_colab.sh $(which ${PYTHON_EXEC})" - ] + "outputs": [] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "v47-pxMe2DP1" @@ -148,7 +104,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "id": "KzjPv7VQL42Q" + }, "outputs": [], "source": [ "!mkdir -p datasets\n", @@ -157,7 +115,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "2HMAOOWR2sf6" @@ -167,7 +124,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "zmWPp53tcLpB" @@ -192,7 +148,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "Y9sSOwrZdaLo" @@ -219,16 +174,15 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/train.py \\\n", + "!python tools/train.py \\\n", " configs/yolov5/yolov5_tiny_1xb16_300e_coco.py \\\n", " --cfg-options \\\n", - " epochs=50 \\\n", + " epochs=10 \\\n", " num_classes=11 \\\n", " data_root='datasets/digital_meter/'" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "tGl9VBZavSmM" @@ -249,7 +203,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "m4Y6DM16vgLF" @@ -270,7 +223,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "G7KqGi2OxnaM" @@ -289,32 +241,16 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/inference.py \\\n", + "!python tools/inference.py \\\n", " configs/yolov5/yolov5_tiny_1xb16_300e_coco.py \\\n", " \"$(cat work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint)\" \\\n", " --dump work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint.pkl \\\n", " --cfg-options \\\n", - " data_root='datasets/digital_meter'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5vk0o2HAyaz7" - }, - "outputs": [], - "source": [ - "import pickle\n", - "\n", - "with open('work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint.pkl', 'rb') as f:\n", - " data = pickle.load(f)\n", - " for k in data.keys():\n", - " print(f'{k}: {data[k]:.5f}')" + " data_root='datasets/digital_meter/'\\\n", + " num_classes=11" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "5np32KYq8JL4" @@ -324,7 +260,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "QF9ObhTrjwI2" @@ -343,7 +278,7 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/export.py \\\n", + "!python tools/export.py \\\n", " configs/yolov5/yolov5_tiny_1xb16_300e_coco.py \\\n", " $(cat work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint) \\\n", " tflite \\\n", @@ -353,7 +288,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "akUdGWE-1Z-8" @@ -367,7 +301,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "1huhlfmt3np4" @@ -415,7 +348,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "ZMSi7Ie--O1h" @@ -443,17 +375,16 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} -m pip install numpy requests colorama serial pyserial" + "!python -m pip install numpy requests colorama serial pyserial" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "IGHvs_2n--gP" }, "source": [ - "### Build Firmware " + "### Build Firmware" ] }, { @@ -469,7 +400,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "6ISXUUG2_uKY" @@ -487,7 +417,7 @@ "outputs": [], "source": [ "!cd example/grove && \\\n", - " ${PYTHON_EXEC} tools/ufconv/uf2conv.py \\\n", + " python tools/ufconv/uf2conv.py \\\n", " -t 0 \\\n", " -c tools/image_gen_cstm/output/output.img \\\n", " -o firmware.uf2" @@ -518,7 +448,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "2zrBJSg0FST3" @@ -536,7 +465,7 @@ "outputs": [], "source": [ "!cd example/grove && \\\n", - " ${PYTHON_EXEC} tools/ufconv/uf2conv.py \\\n", + " python tools/ufconv/uf2conv.py \\\n", " -t 18 \\\n", " -c \"$(cat ../../work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint | sed -e 's/.pth/_int8.tflite/g')\" \\\n", " -o model.uf2" @@ -565,7 +494,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "_4cVSi76EsFg" @@ -579,7 +507,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "id": "qasiYT2qJWPm" @@ -597,7 +524,6 @@ "accelerator": "GPU", "colab": { "gpuType": "T4", - "include_colab_link": true, "provenance": [] }, "gpuClass": "standard", @@ -620,4 +546,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file