From 01260bbec5de947d2d924b8e032028c6d178cf56 Mon Sep 17 00:00:00 2001 From: zhangyubo0722 Date: Tue, 7 Jan 2025 08:52:06 +0000 Subject: [PATCH] fix hip config --- ppdet/engine/export_utils.py | 35 +++++++++++++++++++++++------------ ppdet/engine/trainer.py | 12 ++++++------ 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/ppdet/engine/export_utils.py b/ppdet/engine/export_utils.py index 62acd1db67..a6ae337239 100644 --- a/ppdet/engine/export_utils.py +++ b/ppdet/engine/export_utils.py @@ -245,14 +245,12 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape): fuse_normalize = reader_cfg.get('fuse_normalize', False) sample_transforms = reader_cfg['sample_transforms'] - hpi_dynamic_shape = None for st in sample_transforms[1:]: for key, value in st.items(): p = {'type': key} if key == 'Resize': if int(image_shape[1]) != -1: value['target_size'] = image_shape[1:] - hpi_dynamic_shape = image_shape[1:] value['interp'] = value.get('interp', 1) # cv2.INTER_LINEAR if fuse_normalize and key == 'NormalizeImage': continue @@ -277,7 +275,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape): preprocess_list.append(p) break - return preprocess_list, label_list, hpi_dynamic_shape + return preprocess_list, label_list def _parse_tracker(tracker_cfg): @@ -287,7 +285,7 @@ def _parse_tracker(tracker_cfg): return tracker_params -def _dump_infer_config(config, path, image_shape, model): +def _dump_infer_config(config, path, image_shape, model, input_spec): arch_state = False from ppdet.core.config.yaml_helpers import setup_orderdict setup_orderdict() @@ -381,34 +379,47 @@ def _dump_infer_config(config, path, image_shape, model): reader_cfg = config['TestReader'] dataset_cfg = config['TestDataset'] - infer_cfg['Preprocess'], infer_cfg['label_list'], hpi_dynamic_shape = _parse_reader( + infer_cfg['Preprocess'], infer_cfg['label_list'] = _parse_reader( reader_cfg, dataset_cfg, config['metric'], label_arch, image_shape[1:]) if config.get("uniform_output_enabled", None): + for d in input_spec: + if 'image' in d: + hpi_dynamic_shape = list(d['image'].shape[2:]) def get_dynamic_shapes(hpi_shape): return [[1, 3] + hpi_shape, [1, 3] + hpi_shape, [8, 3] + hpi_shape] - dynamic_shapes = get_dynamic_shapes(hpi_dynamic_shape) if hpi_dynamic_shape else [ + dynamic_shapes = get_dynamic_shapes(hpi_dynamic_shape) if hpi_dynamic_shape != [-1, -1] else [ [1, 3, 320, 320], [1, 3, 640, 640], [8, 3, 1280, 1280] ] shapes = { "image": dynamic_shapes, - "im_shape": [[1, 2], [1, 2], [8, 2]], "scale_factor": [[1, 2], [1, 2], [8, 2]] } - trt_dynamic_shape = [ - [dim for _ in range(shape[0]) for dim in shape[2:]] - for shape in dynamic_shapes - ] trt_dynamic_shape_input_data = { - "im_shape": trt_dynamic_shape, "scale_factor": [ [2, 2], [1, 1], [0.67 for _ in range(2 * shapes["scale_factor"][-1][0])] ] } + model_names_required_imgsize = [ + "DETR", + "DINO", + "RCNN", + "YOLOv3", + "CenterNet", + "BlazeFace", + "BlazeFace-FPN-SSH", + ] + if any(name in config.get('pdx_model_name', None) for name in model_names_required_imgsize): + shapes["im_shape"] = [[1, 2], [1, 2], [8, 2]] + trt_dynamic_shape = [ + [dim for _ in range(shape[0]) for dim in shape[2:]] + for shape in dynamic_shapes + ] + trt_dynamic_shape_input_data["im_shape"] = trt_dynamic_shape hpi_config = OrderedDict({ "backend_configs": OrderedDict({ "paddle_infer": OrderedDict({ diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index a9f310629e..3300e967d2 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -1217,11 +1217,6 @@ def _get_infer_cfg_and_input_spec(self, if export_post_process and not export_benchmark: image_shape = [None] + image_shape[1:] - # Save infer cfg - _dump_infer_config(self.cfg, - os.path.join(save_dir, yaml_name), image_shape, - model) - input_spec = [{ "image": InputSpec( shape=image_shape, name='image'), @@ -1263,6 +1258,11 @@ def _get_infer_cfg_and_input_spec(self, "image": InputSpec( shape=image_shape, name='image') }] + + # Save infer cfg + _dump_infer_config(self.cfg, + os.path.join(save_dir, yaml_name), image_shape, + model, input_spec) return static_model, pruned_input_spec, input_spec @@ -1299,7 +1299,7 @@ def export(self, output_dir='output_inference', for_fd=False): try: import encryption except ModuleNotFoundError: - print("failed to import encryption") + logger.info("Skipping import of the encryption module.") paddle_version = version.parse(paddle.__version__) if self.cfg.get("export_with_pir", False): assert (paddle_version >= version.parse(