-
Notifications
You must be signed in to change notification settings - Fork 144
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Move Convolution and GroupConvolution layer tests to API 2.0
- Loading branch information
Showing
7 changed files
with
2,555 additions
and
3,864 deletions.
There are no files selected for viewing
4,112 changes: 1,264 additions & 2,848 deletions
4,112
.../nvidia_plugin/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp
Large diffs are not rendered by default.
Oops, something went wrong.
48 changes: 0 additions & 48 deletions
48
...lugin/tests/functional/shared_tests_instances/single_layer_tests/convolution_asym_pad.cpp
This file was deleted.
Oops, something went wrong.
840 changes: 805 additions & 35 deletions
840
...a_plugin/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp
Large diffs are not rendered by default.
Oops, something went wrong.
933 changes: 0 additions & 933 deletions
933
.../functional/shared_tests_instances/single_layer_tests/group_convolution_autogenerated.cpp
This file was deleted.
Oops, something went wrong.
137 changes: 137 additions & 0 deletions
137
...a_plugin/tests/functional/shared_tests_instances/single_layer_tests/ov_average_finder.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,137 @@ | ||
// Copyright (C) 2021-2023 Intel Corporation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// | ||
|
||
#pragma once | ||
|
||
#include <error.hpp> | ||
|
||
#include "shared_test_classes/base/ov_subgraph.hpp" | ||
|
||
namespace ov { | ||
namespace test { | ||
|
||
namespace details { | ||
|
||
template <typename T> | ||
static T find_average(const T* ptr, const size_t size) { | ||
const auto abs_sum = | ||
std::accumulate(ptr, ptr + size, float(0.0f), [](float a, T b) {return std::abs(a) + std::abs(static_cast<float>(b)); }); | ||
const T average = static_cast<T>(abs_sum / size); | ||
std::cout << "average absolute :" << average << '\n'; | ||
return average; | ||
} | ||
|
||
struct BlobLimits { | ||
float min = std::numeric_limits<float>::max(); | ||
float max = std::numeric_limits<float>::min(); | ||
float avg = 0.0f; | ||
float abs_min = std::numeric_limits<float>::max(); | ||
float abs_max = 0.0f; | ||
float abs_avg = 0.0f; | ||
}; | ||
|
||
template <typename T> | ||
static BlobLimits find_limits(const T* output, const size_t size, BlobLimits& bl) { | ||
bl = BlobLimits{}; | ||
const auto* ptr = output; | ||
float sum = 0.0f; | ||
float abs_sum = 0.0f; | ||
for (size_t i = 0; i < size; ++i) { | ||
const auto el = static_cast<float>(ptr[i]); | ||
const auto abs_el = std::abs(el); | ||
bl.min = el < bl.min ? el : bl.min; | ||
bl.max = el > bl.max ? el : bl.max; | ||
bl.abs_min = abs_el < bl.abs_min ? abs_el : bl.abs_min; | ||
bl.abs_max = abs_el > bl.abs_max ? abs_el : bl.abs_max; | ||
sum += el; | ||
abs_sum += abs_el; | ||
} | ||
bl.avg = sum / size; | ||
bl.abs_avg = abs_sum / size; | ||
|
||
std::cout << "min = " << bl.min << ", max = " << bl.max << ", avg = " << bl.avg << '\n'; | ||
std::cout << "abs_min = " << bl.abs_min << ", abs_max = " << bl.abs_max << ", abs_avg = " << bl.abs_avg << '\n'; | ||
|
||
return bl; | ||
} | ||
|
||
} // namespace details | ||
|
||
/** | ||
* @brief This class is the base class for AverageFinder class. | ||
* It is used to set the threshold for SubgraphBaseTest::comapare() functions | ||
* accordingly to the average absolute value of the reference output of a single layer test class. | ||
* To use it, threshold_base should be set in the derived class. | ||
* threshold = average * threshold_base | ||
* For now can be used only for the operations with one output. | ||
*/ | ||
class AverageFinderBase : virtual public SubgraphBaseTest { | ||
virtual std::vector<ov::Tensor> calculate_refs() override { | ||
using namespace details; | ||
const auto ref_outputs = SubgraphBaseTest::calculate_refs(); | ||
if (ref_outputs.size() == 1) { | ||
const auto& type = ref_outputs[0].get_element_type(); | ||
float average; | ||
if (type == ov::element::Type_t::f32) { | ||
average = find_average(ref_outputs[0].data<float>(), ref_outputs[0].get_size()); | ||
} else if (type == ov::element::Type_t::f16) { | ||
average = find_average(ref_outputs[0].data<ov::float16>(), ref_outputs[0].get_size()); | ||
} else { | ||
ov::nvidia_gpu::throw_ov_exception(std::string{"Unsupported type: "} + type.get_type_name()); | ||
} | ||
if (!isinf(average)) | ||
abs_threshold = average * threshold_base; | ||
std::cout << "threshold = " << abs_threshold << '\n'; | ||
} | ||
return ref_outputs; | ||
} | ||
|
||
protected: | ||
float threshold_base = 0.0f; | ||
}; | ||
|
||
/** | ||
* @brief This class is the actual base class that should be used for the derived test class. | ||
*/ | ||
template <typename BaseLayerTest> | ||
class AverageFinder : public BaseLayerTest, public AverageFinderBase { | ||
static_assert(std::is_base_of_v<SubgraphBaseTest, BaseLayerTest>, | ||
"BaseLayerTest should be derived from ov::test::SubgraphBaseTest"); | ||
}; | ||
|
||
/** | ||
* @brief This class is the base class for MinMaxAvgFinder class. | ||
* It is used to find and print min, max, average, min absolute, max absolute and average absolute values for the | ||
* single layer test class with one output. | ||
*/ | ||
class MinMaxAvgFinderBase : virtual public SubgraphBaseTest { | ||
virtual std::vector<ov::Tensor> calculate_refs() override { | ||
using namespace details; | ||
const auto ref_outputs = SubgraphBaseTest::calculate_refs(); | ||
if (ref_outputs.size() == 1) { | ||
const auto& type = ref_outputs[0].get_element_type(); | ||
BlobLimits bl; | ||
if (type == ov::element::Type_t::f32) { | ||
find_limits(ref_outputs[0].data<float>(), ref_outputs[0].get_size(), bl); | ||
} else if (type == ov::element::Type_t::f16) { | ||
find_limits(ref_outputs[0].data<ov::float16>(), ref_outputs[0].get_size(), bl); | ||
} else { | ||
ov::nvidia_gpu::throw_ov_exception(std::string{"Unsupported type: "} + type.get_type_name()); | ||
} | ||
} | ||
return ref_outputs; | ||
} | ||
}; | ||
|
||
/** | ||
* @brief This class is the actual base class that should be used for the derived test class. | ||
*/ | ||
template <typename BaseLayerTest> | ||
class MinMaxAvgFinder : public BaseLayerTest, public MinMaxAvgFinderBase { | ||
static_assert(std::is_base_of_v<SubgraphBaseTest, BaseLayerTest>, | ||
"BaseLayerTest should be derived from ov::test::SubgraphBaseTest"); | ||
}; | ||
|
||
} // namespace test | ||
} // namespace ov |
153 changes: 153 additions & 0 deletions
153
..._plugin/tests/functional/shared_tests_instances/single_layer_tests/ov_finite_comparer.cpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,153 @@ | ||
// Copyright (C) 2022-2023 Intel Corporation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// | ||
|
||
#include "ov_finite_comparer.hpp" | ||
#include "ov_models/utils/ov_helpers.hpp" | ||
|
||
using namespace ov::test; | ||
|
||
void ov::test::FiniteLayerComparer::compare(const std::vector<ov::Tensor>& expected_outputs, | ||
const std::vector<ov::Tensor>& actual_outputs, | ||
float threshold, | ||
bool to_check_nans, | ||
std::optional<double> infinity_value) { | ||
for (std::size_t output_iIndex = 0; output_iIndex < expected_outputs.size(); ++output_iIndex) { | ||
const auto& expected = expected_outputs[output_iIndex]; | ||
const auto& actual = actual_outputs[output_iIndex]; | ||
FiniteLayerComparer::compare(expected, actual, threshold, to_check_nans, infinity_value); | ||
} | ||
} | ||
|
||
template <typename T_IE> | ||
inline void call_compare(const ov::Tensor& expected, | ||
const T_IE* actual_buffer, | ||
size_t size, | ||
float threshold, | ||
bool to_check_nans, | ||
std::optional<double> infinity_value) { | ||
const auto& precision = expected.get_element_type(); | ||
switch (precision) { | ||
case ov::element::Type_t::i64: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<int64_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::i32: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<int32_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::i16: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<int16_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::i8: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<int8_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::u64: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<uint64_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::u32: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<uint32_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::u16: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<uint16_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::boolean: | ||
case ov::element::Type_t::u8: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<uint8_t>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::f64: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<double>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::f32: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<float>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::f16: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<ov::float16>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::bf16: | ||
FiniteLayerComparer::compare<T_IE>( | ||
expected.data<ov::bfloat16>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::Type_t::dynamic: | ||
case ov::element::Type_t::undefined: | ||
FiniteLayerComparer::compare<T_IE, T_IE>( | ||
expected.data<T_IE>(), actual_buffer, size, threshold, to_check_nans, infinity_value); | ||
break; | ||
default: | ||
FAIL() << "Comparator for " << precision << " precision isn't supported"; | ||
} | ||
return; | ||
} | ||
|
||
void FiniteLayerComparer::compare(const ov::Tensor& expected, | ||
const ov::Tensor& actual, | ||
float threshold, | ||
bool to_check_nans, | ||
std::optional<double> infinity_value) { | ||
const auto& precision = actual.get_element_type(); | ||
auto k = static_cast<float>(expected.get_element_type().size()) / precision.size(); | ||
// W/A for int4, uint4 | ||
if (expected.get_element_type() == ov::element::Type_t::u4 || | ||
expected.get_element_type() == ov::element::Type_t::i4) { | ||
k /= 2; | ||
} else if (expected.get_element_type() == ov::element::Type_t::undefined || | ||
expected.get_element_type() == ov::element::Type_t::dynamic) { | ||
k = 1; | ||
} | ||
ASSERT_EQ(expected.get_byte_size(), actual.get_byte_size() * k); | ||
|
||
const auto& size = actual.get_size(); | ||
switch (precision) { | ||
case ov::element::f32: | ||
call_compare(expected, actual.data<float>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::i32: | ||
call_compare(expected, actual.data<int32_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::u32: | ||
call_compare(expected, actual.data<uint32_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::i64: | ||
call_compare(expected, actual.data<int64_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::i8: | ||
call_compare(expected, actual.data<int8_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::u16: | ||
call_compare(expected, actual.data<uint16_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::i16: | ||
call_compare(expected, actual.data<int16_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::boolean: | ||
case ov::element::u8: | ||
call_compare(expected, actual.data<uint8_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::u64: | ||
call_compare(expected, actual.data<uint64_t>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::bf16: | ||
call_compare(expected, actual.data<ov::bfloat16>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
case ov::element::f16: | ||
call_compare(expected, actual.data<ov::float16>(), size, threshold, to_check_nans, infinity_value); | ||
break; | ||
default: | ||
FAIL() << "Comparator for " << precision << " precision isn't supported"; | ||
} | ||
} | ||
|
||
void ov::test::FiniteLayerComparer::compare(const std::vector<ov::Tensor>& expected_outputs, | ||
const std::vector<ov::Tensor>& actual_outputs) { | ||
FiniteLayerComparer::compare( | ||
expected_outputs, actual_outputs, abs_threshold, this->to_check_nans, this->infinity_value); | ||
} |
Oops, something went wrong.