diff --git a/examples/classify_image.py b/examples/classify_image.py index a67acd0..29ac91d 100644 --- a/examples/classify_image.py +++ b/examples/classify_image.py @@ -76,7 +76,7 @@ def main(): raise ValueError('Only support uint8 input type.') size = common.input_size(interpreter) - image = Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS) + image = Image.open(args.input).convert('RGB').resize(size, Image.LANCZOS) # Image data must go through two transforms before running inference: # 1. normalization: f = (input - mean) / std diff --git a/examples/detect_image.py b/examples/detect_image.py index 9862f9e..82c5851 100644 --- a/examples/detect_image.py +++ b/examples/detect_image.py @@ -75,7 +75,7 @@ def main(): image = Image.open(args.input) _, scale = common.set_resized_input( - interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS)) + interpreter, image.size, lambda size: image.resize(size, Image.LANCZOS)) print('----INFERENCE TIME----') print('Note: The first inference is slow because it includes', diff --git a/examples/model_pipelining_classify_image.py b/examples/model_pipelining_classify_image.py index 03cb3b8..4c4be70 100644 --- a/examples/model_pipelining_classify_image.py +++ b/examples/model_pipelining_classify_image.py @@ -134,7 +134,7 @@ def main(): size = common.input_size(runner.interpreters()[0]) name = common.input_details(runner.interpreters()[0], 'name') image = np.array( - Image.open(args.input).convert('RGB').resize(size, Image.ANTIALIAS)) + Image.open(args.input).convert('RGB').resize(size, Image.LANCZOS)) def producer(): for _ in range(args.count): diff --git a/examples/movenet_pose_estimation.py b/examples/movenet_pose_estimation.py index 7a9bea6..4b5f68b 100644 --- a/examples/movenet_pose_estimation.py +++ b/examples/movenet_pose_estimation.py @@ -58,7 +58,7 @@ def main(): interpreter.allocate_tensors() img = Image.open(args.input) - resized_img = img.resize(common.input_size(interpreter), Image.ANTIALIAS) + resized_img = img.resize(common.input_size(interpreter), Image.LANCZOS) common.set_input(interpreter, resized_img) interpreter.invoke() diff --git a/examples/semantic_segmentation.py b/examples/semantic_segmentation.py index 8c99d28..747a0c0 100644 --- a/examples/semantic_segmentation.py +++ b/examples/semantic_segmentation.py @@ -109,9 +109,9 @@ def main(): img = Image.open(args.input) if args.keep_aspect_ratio: resized_img, _ = common.set_resized_input( - interpreter, img.size, lambda size: img.resize(size, Image.ANTIALIAS)) + interpreter, img.size, lambda size: img.resize(size, Image.LANCZOS)) else: - resized_img = img.resize((width, height), Image.ANTIALIAS) + resized_img = img.resize((width, height), Image.LANCZOS) common.set_input(interpreter, resized_img) interpreter.invoke() diff --git a/tests/detect_test.py b/tests/detect_test.py index f5189c5..b840090 100644 --- a/tests/detect_test.py +++ b/tests/detect_test.py @@ -33,7 +33,7 @@ def get_objects(model_file, delegate, image_file, score_threshold=0.0): interpreter.allocate_tensors() image = Image.open(test_utils.test_data_path(image_file)) _, scale = common.set_resized_input( - interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS)) + interpreter, image.size, lambda size: image.resize(size, Image.LANCZOS)) interpreter.invoke() return detect.get_objects( interpreter, score_threshold=score_threshold, image_scale=scale) diff --git a/tests/multiple_tpus_test.py b/tests/multiple_tpus_test.py index 8fed115..5e93d3b 100644 --- a/tests/multiple_tpus_test.py +++ b/tests/multiple_tpus_test.py @@ -64,7 +64,7 @@ def detection_task(num_inferences): _, scale = common.set_resized_input( interpreter, img.size, - lambda size, image=img: image.resize(size, Image.ANTIALIAS)) + lambda size, image=img: image.resize(size, Image.LANCZOS)) interpreter.invoke() ret = detect.get_objects( interpreter, score_threshold=0.7, image_scale=scale) diff --git a/tests/segment_test.py b/tests/segment_test.py index 7aae040..634fe81 100644 --- a/tests/segment_test.py +++ b/tests/segment_test.py @@ -47,7 +47,7 @@ def segment_image(model_file, delegate, image_file, mask_file): interpreter.allocate_tensors() image = Image.open(test_utils.test_data_path(image_file)).resize( - common.input_size(interpreter), Image.ANTIALIAS) + common.input_size(interpreter), Image.LANCZOS) common.set_input(interpreter, image) interpreter.invoke()