Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor image and video classes to support parallel and order flag #31

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions Katna/image_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,10 @@ class ImageSelector(object):
:type object: class:`Object`
"""

def __init__(self, n_processes=1):
def __init__(self, n_processes=1, ordered=False):
# Setting number of processes for Multiprocessing Pool Object
self.n_processes = n_processes
self.ordered = ordered

# Setting for optimum Brightness values
self.min_brightness_value = config.ImageSelector.min_brightness_value
Expand Down Expand Up @@ -297,7 +298,8 @@ def select_best_frames(self, input_key_frames, number_of_frames):
selected_images_index = self.__get_best_images_index_from_each_cluster__(
filtered_key_frames, files_clusters_index_array
)

if self.ordered:
selected_images_index = sorted(selected_images_index)
for index in selected_images_index:
img = filtered_key_frames[index]
filtered_images_list.append(img)
Expand Down
29 changes: 23 additions & 6 deletions Katna/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,24 @@ class Video(object):
:type object: class:`Object`
"""

def __init__(self, autoflip_build_path=None, autoflip_model_path=None):
def __init__(self, autoflip_build_path=None, autoflip_model_path=None, parallel: bool=True, ordered: bool=False):
"""
:param autoflip_build_path: path to the mediapipe autoflip build
:type autoflip_build_path: str
:param autoflip_model_path: path to the mediapipe autoflip model
:type autoflip_model_path: str
:param parallel: if True, then parallel processing will be used for keyframe extraction, defaults to True
:type parallel: bool, optional
:param ordered: if True, then keyframes will be extracted in order, defaults to False
:type ordered: bool, optional
"""
# Find out location of ffmpeg binary on system
helper._set_ffmpeg_binary_path()
# If the duration of the clipped video is less than **min_video_duration**
# then, the clip will be added with the previous clipped
self._min_video_duration = config.Video.min_video_duration
self.parallel = parallel
self.ordered = ordered

# Calculating optimum number of processes for multiprocessing
self.n_processes = cpu_count() // 2 - 1
Expand Down Expand Up @@ -176,15 +188,20 @@ def _extract_keyframes_from_video(self, no_of_frames, file_path):

# Passing all the clipped videos for the frame extraction using map function of the
# multiprocessing pool
with self.pool_extractor:
extracted_candidate_frames = self.pool_extractor.map(
frame_extractor.extract_candidate_frames, chunked_videos
)
if self.parallel:
with self.pool_extractor:
extracted_candidate_frames = self.pool_extractor.map(
frame_extractor.extract_candidate_frames, chunked_videos
)
else:
extracted_candidate_frames = []
for chunked_video in chunked_videos:
extracted_candidate_frames.append(frame_extractor.extract_candidate_frames(chunked_video))
# Converting the nested list of extracted frames into 1D list
extracted_candidate_frames = functools.reduce(operator.iconcat, extracted_candidate_frames, [])

self._remove_clips(chunked_videos)
image_selector = ImageSelector(self.n_processes)
image_selector = ImageSelector(self.n_processes, self.ordered)

top_frames = image_selector.select_best_frames(
extracted_candidate_frames, no_of_frames
Expand Down