Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix hip config #9268

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 23 additions & 12 deletions ppdet/engine/export_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,14 +245,12 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):

fuse_normalize = reader_cfg.get('fuse_normalize', False)
sample_transforms = reader_cfg['sample_transforms']
hpi_dynamic_shape = None
for st in sample_transforms[1:]:
for key, value in st.items():
p = {'type': key}
if key == 'Resize':
if int(image_shape[1]) != -1:
value['target_size'] = image_shape[1:]
hpi_dynamic_shape = image_shape[1:]
value['interp'] = value.get('interp', 1) # cv2.INTER_LINEAR
if fuse_normalize and key == 'NormalizeImage':
continue
Expand All @@ -277,7 +275,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
preprocess_list.append(p)
break

return preprocess_list, label_list, hpi_dynamic_shape
return preprocess_list, label_list


def _parse_tracker(tracker_cfg):
Expand All @@ -287,7 +285,7 @@ def _parse_tracker(tracker_cfg):
return tracker_params


def _dump_infer_config(config, path, image_shape, model):
def _dump_infer_config(config, path, image_shape, model, input_spec):
arch_state = False
from ppdet.core.config.yaml_helpers import setup_orderdict
setup_orderdict()
Expand Down Expand Up @@ -381,34 +379,47 @@ def _dump_infer_config(config, path, image_shape, model):
reader_cfg = config['TestReader']
dataset_cfg = config['TestDataset']

infer_cfg['Preprocess'], infer_cfg['label_list'], hpi_dynamic_shape = _parse_reader(
infer_cfg['Preprocess'], infer_cfg['label_list'] = _parse_reader(
reader_cfg, dataset_cfg, config['metric'], label_arch, image_shape[1:])
if config.get("uniform_output_enabled", None):
for d in input_spec:
if 'image' in d:
hpi_dynamic_shape = list(d['image'].shape[2:])
def get_dynamic_shapes(hpi_shape):
return [[1, 3] + hpi_shape, [1, 3] + hpi_shape, [8, 3] + hpi_shape]

dynamic_shapes = get_dynamic_shapes(hpi_dynamic_shape) if hpi_dynamic_shape else [
dynamic_shapes = get_dynamic_shapes(hpi_dynamic_shape) if hpi_dynamic_shape != [-1, -1] else [
[1, 3, 320, 320],
[1, 3, 640, 640],
[8, 3, 1280, 1280]
]
shapes = {
"image": dynamic_shapes,
"im_shape": [[1, 2], [1, 2], [8, 2]],
"scale_factor": [[1, 2], [1, 2], [8, 2]]
}
trt_dynamic_shape = [
[dim for _ in range(shape[0]) for dim in shape[2:]]
for shape in dynamic_shapes
]
trt_dynamic_shape_input_data = {
"im_shape": trt_dynamic_shape,
"scale_factor": [
[2, 2],
[1, 1],
[0.67 for _ in range(2 * shapes["scale_factor"][-1][0])]
]
}
model_names_required_imgsize = [
"DETR",
"DINO",
"RCNN",
"YOLOv3",
"CenterNet",
"BlazeFace",
"BlazeFace-FPN-SSH",
]
if any(name in config.get('pdx_model_name', None) for name in model_names_required_imgsize):
shapes["im_shape"] = [[1, 2], [1, 2], [8, 2]]
trt_dynamic_shape = [
[dim for _ in range(shape[0]) for dim in shape[2:]]
for shape in dynamic_shapes
]
trt_dynamic_shape_input_data["im_shape"] = trt_dynamic_shape
hpi_config = OrderedDict({
"backend_configs": OrderedDict({
"paddle_infer": OrderedDict({
Expand Down
12 changes: 6 additions & 6 deletions ppdet/engine/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1217,11 +1217,6 @@ def _get_infer_cfg_and_input_spec(self,
if export_post_process and not export_benchmark:
image_shape = [None] + image_shape[1:]

# Save infer cfg
_dump_infer_config(self.cfg,
os.path.join(save_dir, yaml_name), image_shape,
model)

input_spec = [{
"image": InputSpec(
shape=image_shape, name='image'),
Expand Down Expand Up @@ -1263,6 +1258,11 @@ def _get_infer_cfg_and_input_spec(self,
"image": InputSpec(
shape=image_shape, name='image')
}]

# Save infer cfg
_dump_infer_config(self.cfg,
os.path.join(save_dir, yaml_name), image_shape,
model, input_spec)

return static_model, pruned_input_spec, input_spec

Expand Down Expand Up @@ -1299,7 +1299,7 @@ def export(self, output_dir='output_inference', for_fd=False):
try:
import encryption
except ModuleNotFoundError:
print("failed to import encryption")
logger.info("Skipping import of the encryption module.")
paddle_version = version.parse(paddle.__version__)
if self.cfg.get("export_with_pir", False):
assert (paddle_version >= version.parse(
Expand Down