Skip to content

fix hardmax dt

fix hardmax dt #15

GitHub Actions / Test Results failed Aug 25, 2023 in 0s

3 fail, 3 563 pass in 1m 59s

3 566 tests   3 563 ✔️  1m 59s ⏱️
       1 suites         0 💤
       1 files           3

Results for commit 7dad301.

Annotations

Check warning on line 0 in tests.importer.onnx_.basic.test_hardmax

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_hardmax[0-in_shape0] (tests.importer.onnx_.basic.test_hardmax) failed

test_results/onnx_basic.xml [took 0s]
Raw output
AssertionError: Fault result in infer + Fail [ infer cpu ptq ] Output 0:cosine similarity = 0.0, threshold = 0.98
assert False
in_shape = [1, 3, 16, 16], axis = 0
request = <FixtureRequest for <Function test_hardmax[0-in_shape0]>>

    @pytest.mark.parametrize('in_shape', in_shapes)
    @pytest.mark.parametrize('axis', axes)
    def test_hardmax(in_shape, axis, request):
        model_def = _make_module(in_shape, axis)
    
        runner = OnnxTestRunner(request.node.name)
        model_file = runner.from_onnx_helper(model_def)
>       runner.run(model_file)

tests/importer/onnx_/basic/test_hardmax.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/onnx_test_runner.py:58: in run
    super().run(model_file)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <onnx_test_runner.OnnxTestRunner object at 0x7fb510414b90>
model_file = 'tests_output/test_hardmax_0-in_shape0_/simplified.onnx'

    def run(self, model_file: Union[List[str], str]):
        if not self.inputs:
            self.parse_model(model_file)
    
        self.generate_all_data()
        self.write_compile_opt()
    
        expected = self.cpu_infer(model_file)
        targets = self.cfg['target']
        model_content = self.read_model_file(model_file)
        import_options = nncase.ImportOptions()
    
        compiler = None
        dump_hist = self.cfg['dump_hist']
        for k_target, v_target in targets.items():
            tmp_dir = os.path.join(self.case_dir, 'tmp')
            if v_target['eval'] or v_target['infer']:
                compile_options = self.get_compile_options(k_target, tmp_dir)
                compiler = nncase.Compiler(compile_options)
                self.import_model(compiler, model_content, import_options)
    
            for stage in ['eval', 'infer']:
                if v_target[stage]:
                    for k_mode, v_mode in v_target['mode'].items():
                        if v_mode['enabled']:
                            os.makedirs(tmp_dir, exist_ok=True)
                            if stage == 'eval':
                                actual = self.run_evaluator(compiler, tmp_dir)
                            else:
                                actual = self.run_inference(
                                    compiler, k_target, v_mode['enabled'], tmp_dir)
                            target_dir = os.path.join(self.case_dir, stage, k_target)
                            os.makedirs(target_dir, exist_ok=True)
                            mode_dir = os.path.join(target_dir, k_mode)
                            shutil.move(tmp_dir, mode_dir)
                            judge, result = self.compare_results(
                                expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], dump_hist, mode_dir)
    
                            if stage == 'infer' and self.cfg['dump_infer']:
                                self.infer_dict['result'] = 'Pass' if judge else 'Fail'
                                self.infer_dict['remark'] = result.replace('\n', ' ')
                                dump_dict_to_json(self.infer_dict, self.infer_file)
                            if not judge:
                                if test_utils.in_ci():
                                    self.clear(self.case_dir)
>                               assert (judge), f"Fault result in {stage} + {result}"
E                               AssertionError: Fault result in infer + Fail [ infer cpu ptq ] Output 0:cosine similarity = 0.0, threshold = 0.98
E                               assert False

tests/test_runner.py:275: AssertionError

Check warning on line 0 in tests.importer.onnx_.basic.test_hardmax

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_hardmax[-2-in_shape0] (tests.importer.onnx_.basic.test_hardmax) failed

test_results/onnx_basic.xml [took 0s]
Raw output
AssertionError: Fault result in infer + Fail [ infer cpu ptq ] Output 0:cosine similarity = 0.0, threshold = 0.98
assert False
in_shape = [1, 3, 16, 16], axis = -2
request = <FixtureRequest for <Function test_hardmax[-2-in_shape0]>>

    @pytest.mark.parametrize('in_shape', in_shapes)
    @pytest.mark.parametrize('axis', axes)
    def test_hardmax(in_shape, axis, request):
        model_def = _make_module(in_shape, axis)
    
        runner = OnnxTestRunner(request.node.name)
        model_file = runner.from_onnx_helper(model_def)
>       runner.run(model_file)

tests/importer/onnx_/basic/test_hardmax.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/onnx_test_runner.py:58: in run
    super().run(model_file)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <onnx_test_runner.OnnxTestRunner object at 0x7fb51039fdd0>
model_file = 'tests_output/test_hardmax_-2-in_shape0_/simplified.onnx'

    def run(self, model_file: Union[List[str], str]):
        if not self.inputs:
            self.parse_model(model_file)
    
        self.generate_all_data()
        self.write_compile_opt()
    
        expected = self.cpu_infer(model_file)
        targets = self.cfg['target']
        model_content = self.read_model_file(model_file)
        import_options = nncase.ImportOptions()
    
        compiler = None
        dump_hist = self.cfg['dump_hist']
        for k_target, v_target in targets.items():
            tmp_dir = os.path.join(self.case_dir, 'tmp')
            if v_target['eval'] or v_target['infer']:
                compile_options = self.get_compile_options(k_target, tmp_dir)
                compiler = nncase.Compiler(compile_options)
                self.import_model(compiler, model_content, import_options)
    
            for stage in ['eval', 'infer']:
                if v_target[stage]:
                    for k_mode, v_mode in v_target['mode'].items():
                        if v_mode['enabled']:
                            os.makedirs(tmp_dir, exist_ok=True)
                            if stage == 'eval':
                                actual = self.run_evaluator(compiler, tmp_dir)
                            else:
                                actual = self.run_inference(
                                    compiler, k_target, v_mode['enabled'], tmp_dir)
                            target_dir = os.path.join(self.case_dir, stage, k_target)
                            os.makedirs(target_dir, exist_ok=True)
                            mode_dir = os.path.join(target_dir, k_mode)
                            shutil.move(tmp_dir, mode_dir)
                            judge, result = self.compare_results(
                                expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], dump_hist, mode_dir)
    
                            if stage == 'infer' and self.cfg['dump_infer']:
                                self.infer_dict['result'] = 'Pass' if judge else 'Fail'
                                self.infer_dict['remark'] = result.replace('\n', ' ')
                                dump_dict_to_json(self.infer_dict, self.infer_file)
                            if not judge:
                                if test_utils.in_ci():
                                    self.clear(self.case_dir)
>                               assert (judge), f"Fault result in {stage} + {result}"
E                               AssertionError: Fault result in infer + Fail [ infer cpu ptq ] Output 0:cosine similarity = 0.0, threshold = 0.98
E                               assert False

tests/test_runner.py:275: AssertionError

Check warning on line 0 in tests.importer.onnx_.basic.test_hardmax

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_hardmax[-4-in_shape0] (tests.importer.onnx_.basic.test_hardmax) failed

test_results/onnx_basic.xml [took 0s]
Raw output
AssertionError: Fault result in infer + Fail [ infer cpu ptq ] Output 0:cosine similarity = 0.0, threshold = 0.98
assert False
in_shape = [1, 3, 16, 16], axis = -4
request = <FixtureRequest for <Function test_hardmax[-4-in_shape0]>>

    @pytest.mark.parametrize('in_shape', in_shapes)
    @pytest.mark.parametrize('axis', axes)
    def test_hardmax(in_shape, axis, request):
        model_def = _make_module(in_shape, axis)
    
        runner = OnnxTestRunner(request.node.name)
        model_file = runner.from_onnx_helper(model_def)
>       runner.run(model_file)

tests/importer/onnx_/basic/test_hardmax.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/onnx_test_runner.py:58: in run
    super().run(model_file)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <onnx_test_runner.OnnxTestRunner object at 0x7fb5103bcb50>
model_file = 'tests_output/test_hardmax_-4-in_shape0_/simplified.onnx'

    def run(self, model_file: Union[List[str], str]):
        if not self.inputs:
            self.parse_model(model_file)
    
        self.generate_all_data()
        self.write_compile_opt()
    
        expected = self.cpu_infer(model_file)
        targets = self.cfg['target']
        model_content = self.read_model_file(model_file)
        import_options = nncase.ImportOptions()
    
        compiler = None
        dump_hist = self.cfg['dump_hist']
        for k_target, v_target in targets.items():
            tmp_dir = os.path.join(self.case_dir, 'tmp')
            if v_target['eval'] or v_target['infer']:
                compile_options = self.get_compile_options(k_target, tmp_dir)
                compiler = nncase.Compiler(compile_options)
                self.import_model(compiler, model_content, import_options)
    
            for stage in ['eval', 'infer']:
                if v_target[stage]:
                    for k_mode, v_mode in v_target['mode'].items():
                        if v_mode['enabled']:
                            os.makedirs(tmp_dir, exist_ok=True)
                            if stage == 'eval':
                                actual = self.run_evaluator(compiler, tmp_dir)
                            else:
                                actual = self.run_inference(
                                    compiler, k_target, v_mode['enabled'], tmp_dir)
                            target_dir = os.path.join(self.case_dir, stage, k_target)
                            os.makedirs(target_dir, exist_ok=True)
                            mode_dir = os.path.join(target_dir, k_mode)
                            shutil.move(tmp_dir, mode_dir)
                            judge, result = self.compare_results(
                                expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], dump_hist, mode_dir)
    
                            if stage == 'infer' and self.cfg['dump_infer']:
                                self.infer_dict['result'] = 'Pass' if judge else 'Fail'
                                self.infer_dict['remark'] = result.replace('\n', ' ')
                                dump_dict_to_json(self.infer_dict, self.infer_file)
                            if not judge:
                                if test_utils.in_ci():
                                    self.clear(self.case_dir)
>                               assert (judge), f"Fault result in {stage} + {result}"
E                               AssertionError: Fault result in infer + Fail [ infer cpu ptq ] Output 0:cosine similarity = 0.0, threshold = 0.98
E                               assert False

tests/test_runner.py:275: AssertionError