From 31731ee18d07e83a8be6322306be97bc6c20dcc5 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Thu, 20 Jun 2024 18:55:49 +0200 Subject: [PATCH 01/20] very much work in progress --- .../acquisition/acq_eng_py/internal/engine.py | 427 ++------- .../acq_eng_py/main/AcqEngPy_Acquisition.py | 267 ------ .../acq_eng_py/main/acq_eng_metadata.py | 4 +- .../acq_eng_py/main/acquisition_event.py | 902 +++++++++--------- .../acq_eng_py/mm_device_implementations.py | 85 ++ .../acquisition/acquisition_superclass.py | 62 +- .../acquisition/java_backend_acquisitions.py | 9 +- .../python_backend_acquisitions.py | 203 +++- pycromanager/headless.py | 35 +- 9 files changed, 849 insertions(+), 1145 deletions(-) delete mode 100644 pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py create mode 100644 pycromanager/acquisition/acq_eng_py/mm_device_implementations.py diff --git a/pycromanager/acquisition/acq_eng_py/internal/engine.py b/pycromanager/acquisition/acq_eng_py/internal/engine.py index 6018d783..e535a2a3 100644 --- a/pycromanager/acquisition/acq_eng_py/internal/engine.py +++ b/pycromanager/acquisition/acq_eng_py/internal/engine.py @@ -1,14 +1,13 @@ import traceback -from concurrent.futures import Future from concurrent.futures import ThreadPoolExecutor import time import datetime -from pycromanager.acquisition.acq_eng_py.main.acquisition_event import AcquisitionEvent -from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +from pycromanager.acquisition.new.acq_events import AcquisitionEvent from pycromanager.acquisition.acq_eng_py.internal.hardware_sequences import HardwareSequences import pymmcore from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification +from pycromanager.acquisition.python_backend_acquisitions import PythonBackendAcquisition HARDWARE_ERROR_RETRIES = 6 DELAY_BETWEEN_RETRIES_MS = 5 @@ -33,7 +32,7 @@ def shutdown(self): @staticmethod def get_core(): - return Engine.singleton.core + return Engine.singleton._core @staticmethod def get_instance(): @@ -41,54 +40,22 @@ def get_instance(): def finish_acquisition(self, acq): def finish_acquisition_inner(): - if acq.is_debug_mode(): - Engine.get_core().logMessage("recieved acquisition finished signal") self.sequenced_events.clear() - if acq.is_debug_mode(): - Engine.get_core().logMessage("creating acquisition finished event") - self.execute_acquisition_event(AcquisitionEvent.create_acquisition_finished_event(acq)) + self.execute_acquisition_event(acq, None) acq.block_until_events_finished() return self.event_generator_executor.submit(finish_acquisition_inner) - def submit_event_iterator(self, event_iterator): - def submit_event_iterator_inner(): - acq = None - while True: - try: - event = next(event_iterator, None) - except StopIteration: - traceback.print_exc() - break - if event is None: - break # iterator exhausted - acq = event.acquisition_ - if acq.is_debug_mode(): - Engine.get_core().logMessage("got event: " + event.to_string()) - for h in event.acquisition_.get_event_generation_hooks(): - event = h.run(event) - if event is None: - return - while event.acquisition_.is_paused(): - time.sleep(0.005) - try: - if acq.is_abort_requested(): - if acq.is_debug_mode(): - Engine.get_core().logMessage("acquisition aborted") - return - image_acquired_future = self.process_acquisition_event(event) - image_acquired_future.result() - - except Exception as ex: - traceback.print_exc() - acq.abort(ex) - raise ex + def submit_event_iterator(self, acquisition, event_generator): - last_image_future = self.process_acquisition_event(AcquisitionEvent.create_acquisition_sequence_end_event(acq)) - last_image_future.result() + for event in event_generator: + image_acquired_future = self.acq_executor.submit(lambda: self.execute_acquisition_event(acquisition, event)) + # TODO: before, this used to use the event generator thread to do any transpiling (i.e. checking for sequenceing) + # in order to (theoretically) improve speed. Now we're just returning the image acquired future directly. + # Probably doesn't matter becuase this is suppoed to be async anyway - return self.event_generator_executor.submit(submit_event_iterator_inner) + # return self.event_generator_executor.submit(submit_event_iterator_inner) def check_for_default_devices(self, event: AcquisitionEvent): @@ -99,134 +66,65 @@ def check_for_default_devices(self, event: AcquisitionEvent): if event.get_x_position() is not None and (xy_stage is None or xy_stage == ""): raise Exception("Event requires an x position, but no Core-XYStage device is set") - def process_acquisition_event(self, event: AcquisitionEvent) -> Future: - def process_acquisition_event_inner(): - try: - self.check_for_default_devices(event) - if event.acquisition_.is_debug_mode(): - self.core.logMessage("Processing event: " + str(event)) - self.core.logMessage("checking for sequencing") - if not self.sequenced_events and not event.is_acquisition_sequence_end_event(): - self.sequenced_events.append(event) - elif self.is_sequencable(self.sequenced_events, event, len(self.sequenced_events) + 1): - # merge event into the sequence - self.sequenced_events.append(event) - else: - # all events - sequence_event = self.merge_sequence_event(self.sequenced_events) - self.sequenced_events.clear() - # Add in the start of the new sequence - if not event.is_acquisition_sequence_end_event(): - self.sequenced_events.append(event) - if event.acquisition_.is_debug_mode(): - self.core.logMessage("executing acquisition event") - try: - self.execute_acquisition_event(sequence_event) - except HardwareControlException as e: - raise e - except Exception as e: - traceback.print_exc() - if self.core.is_sequence_running(): - self.core.stop_sequence_acquisition() - raise e - - - return self.acq_executor.submit(process_acquisition_event_inner) - - def execute_acquisition_event(self, event: AcquisitionEvent): + # def process_acquisition_event(self, acquisition: PythonBackendAcquisition, + # event: AcquisitionEvent) -> Future: + + # TODO + # def process_acquisition_event_inner(): + # try: + # self.check_for_default_devices(event) + # if event.acquisition_.is_debug_mode(): + # self.core.logMessage("Processing event: " + str(event)) + # self.core.logMessage("checking for sequencing") + # if not self.sequenced_events and not event.is_acquisition_sequence_end_event(): + # self.sequenced_events.append(event) + # elif self.is_sequencable(self.sequenced_events, event, len(self.sequenced_events) + 1): + # # merge event into the sequence + # self.sequenced_events.append(event) + # else: + # # all events + # sequence_event = self.merge_sequence_event(self.sequenced_events) + # self.sequenced_events.clear() + # # Add in the start of the new sequence + # if not event.is_acquisition_sequence_end_event(): + # self.sequenced_events.append(event) + # if event.acquisition_.is_debug_mode(): + # self.core.logMessage("executing acquisition event") + # try: + # self.execute_acquisition_event(sequence_event) + # except HardwareControlException as e: + # raise e + # except Exception as e: + # traceback.print_exc() + # if self.core.is_sequence_running(): + # self.core.stop_sequence_acquisition() + # raise e + # + # + # return self.acq_executor.submit(process_acquisition_event_inner) + + def execute_acquisition_event(self, acquisition: PythonBackendAcquisition,event: AcquisitionEvent): # check if we should pause until the minimum start time of the event has occured - while event.get_minimum_start_time_absolute() is not None and \ - time.time() * 1000 < event.get_minimum_start_time_absolute(): - wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 - event.acquisition_.block_unless_aborted(wait_time) - - if event.is_acquisition_finished_event(): + # while event.get_minimum_start_time_absolute() is not None and \ + # time.time() * 1000 < event.get_minimum_start_time_absolute(): + # wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 + # event.acquisition_.block_unless_aborted(wait_time) + + if event is not None: + # execute the event + for instruction in event.device_instructions: + instruction.execute() + else: # signal to finish saving thread and mark acquisition as finished - if event.acquisition_.are_events_finished(): + if acquisition._are_events_finished(): return # Duplicate finishing event, possibly from x-ing out viewer + acquisition._add_to_output(None) + acquisition._post_notification(AcqNotification.create_acq_events_finished_notification()) - # send message acquisition finished message so things shut down properly - for h in event.acquisition_.get_event_generation_hooks(): - h.run(event) - h.close() - for h in event.acquisition_.get_before_hardware_hooks(): - h.run(event) - h.close() - for h in event.acquisition_.get_after_hardware_hooks(): - h.run(event) - h.close() - for h in event.acquisition_.get_after_camera_hooks(): - h.run(event) - h.close() - for h in event.acquisition_.get_after_exposure_hooks(): - h.run(event) - h.close() - event.acquisition_.add_to_output(self.core.TaggedImage(None, None)) - event.acquisition_.post_notification(AcqNotification.create_acq_events_finished_notification()) - - else: - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.PRE_HARDWARE)) - for h in event.acquisition_.get_before_hardware_hooks(): - event = h.run(event) - if event is None: - return # The hook cancelled this event - self.abort_if_requested(event, None) - hardware_sequences_in_progress = HardwareSequences() - try: - self.prepare_hardware(event, hardware_sequences_in_progress) - except HardwareControlException as e: - self.stop_hardware_sequences(hardware_sequences_in_progress) - raise e - - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.PRE_Z_DRIVE)) - for h in event.acquisition_.get_before_z_hooks(): - event = h.run(event) - if event is None: - return # The hook cancelled this event - self.abort_if_requested(event, None) - - try: - self.start_z_drive(event, hardware_sequences_in_progress) - except HardwareControlException as e: - self.stop_hardware_sequences(hardware_sequences_in_progress) - raise e - - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.POST_HARDWARE)) - for h in event.acquisition_.get_after_hardware_hooks(): - event = h.run(event) - if event is None: - return # The hook cancelled this event - self.abort_if_requested(event, hardware_sequences_in_progress) - # Hardware hook may have modified wait time, so check again if we should - # pause until the minimum start time of the event has occurred. - while event.get_minimum_start_time_absolute() is not None and \ - time.time() * 1000 < event.get_minimum_start_time_absolute(): - try: - self.abort_if_requested(event, hardware_sequences_in_progress) - wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 - event.acquisition_.block_unless_aborted(wait_time) - except Exception: - # Abort while waiting for next time point - return - - if event.should_acquire_image(): - if event.acquisition_.is_debug_mode(): - self.core.logMessage("acquiring image(s)") - try: - self.acquire_images(event, hardware_sequences_in_progress) - except TimeoutError: - # Don't abort on a timeout - # TODO: this could probably be an option added to the acquisition in the future - print("Timeout while acquiring images") - - # if the acquisition was aborted, make sure everything shuts down properly - self.abort_if_requested(event, hardware_sequences_in_progress) - def acquire_images(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: + def acquire_images(self, acquisition : PythonBackendAcquisition, + event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: """ Acquire 1 or more images in a sequence, add some metadata, then put them into an output queue. @@ -234,176 +132,25 @@ def acquire_images(self, event: AcquisitionEvent, hardware_sequences_in_progress If the event is a sequence and a sequence acquisition is started in the core, It should be completed by the time this method returns. """ - camera_image_counts = event.get_camera_image_counts(self.core.get_camera_device()) - if event.get_sequence() is not None and len(event.get_sequence()) > 1: - # start sequences on one or more cameras - for camera_device_name, image_count in camera_image_counts.items(): - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SEQUENCE_STARTED)) - self.core.start_sequence_acquisition( - camera_device_name, camera_image_counts[camera_device_name], 0, True) - else: - # snap one image with no sequencing - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SNAP)) - if event.get_camera_device_name() is not None: - current_camera = self.core.get_camera_device() - width = self.core.get_image_width() - height = self.core.get_image_height() - self.core.set_camera_device(event.get_camera_device_name()) - self.core.snap_image() - self.core.set_camera_device(current_camera) - else: - # Unlike MMCoreJ, pymmcore does not automatically add this metadata when snapping, so need to do it manually - width = self.core.get_image_width() - height = self.core.get_image_height() - self.core.snap_image() - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_SNAP)) - for h in event.acquisition_.get_after_exposure_hooks(): - h.run(event) - - # get elapsed time - current_time_ms = time.time() * 1000 - if event.acquisition_.get_start_time_ms() == -1: - # first image, initialize - event.acquisition_.set_start_time_ms(current_time_ms) - - # need to assign events to images as they come out, assuming they might be in arbitrary order, - # but that each camera itself is ordered - multi_cam_adapter_camera_event_lists = None - if event.get_sequence() is not None: - multi_cam_adapter_camera_event_lists = {} - for cam_index in range(self.core.get_number_of_camera_channels()): - multi_cam_adapter_camera_event_lists[cam_index] = [] - for e in event.get_sequence(): - multi_cam_adapter_camera_event_lists[cam_index].append(e) - - # Run a hook after the camera sequence acquisition has started. This can be used for - # external triggering of the camera (when it is in sequence mode). - # note: SnapImage will block until exposure finishes. - # If it is desired that AfterCameraHooks trigger cameras - # in Snap mode, one possibility is that those hooks (or SnapImage) should run - # in a separate thread, started after snapImage is started. But there is no - # guarantee that the camera will be ready to accept a trigger at that point. - for h in event.acquisition_.get_after_camera_hooks(): - h.run(event) - - if event.acquisition_.is_debug_mode(): - self.core.log_message("images acquired, copying from core") - start_copy_time = time.time() - # Loop through and collect all acquired images. There will be - # (# of images in sequence) x (# of camera channels) of them - timeout = False - for i in range(0, 1 if event.get_sequence() is None else len(event.get_sequence())): - if timeout: - # Cancel the rest of the sequence - self.stop_hardware_sequences(hardware_sequences_in_progress) - break - try: - exposure = self.core.get_exposure() if event.get_exposure() is None else event.get_exposure() - except Exception as ex: - raise Exception("Couldnt get exposure form core") - num_cam_channels = self.core.get_number_of_camera_channels() - - need_to_run_after_exposure_hooks = len(event.acquisition_.get_after_exposure_hooks()) > 0 - for cam_index in range(num_cam_channels): - ti = None - camera_name = None - while ti is None: - if event.acquisition_.is_abort_requested(): - return - try: - if event.get_sequence() is not None and len(event.get_sequence()) > 1: - if self.core.is_buffer_overflowed(): - raise Exception("Sequence buffer overflow") - try: - ti = self.core.pop_next_tagged_image() - camera_name = ti.tags["Camera"] - except Exception as e: - # continue waiting - if not self.core.is_sequence_running() and self.core.get_remaining_image_count() == 0: - raise Exception("Expected images did not arrive in circular buffer") - # check if timeout has been exceeded. This is used in the case of a - # camera waiting for a trigger that never comes. - if event.get_sequence()[i].get_timeout_ms() is not None: - if time.time() - start_copy_time > event.get_sequence()[i].get_timeout_ms(): - timeout = True - self.core.stop_sequence_acquisition() - while self.core.is_sequence_running(): - time.sleep(0.001) - break - else: - try: - # TODO: probably there should be a timeout here too, but I'm - # not sure the snap_image system supports it (as opposed to sequences) - # This is a little different from the java version due to differences in metadata - # handling in the SWIG wrapper - camera_name = self.core.get_camera_device() - ti = self.core.get_tagged_image(cam_index, camera_name, height, width) - except Exception as e: - # continue waiting - pass - except Exception as ex: - # Sequence buffer overflow - e = HardwareControlException(str(ex)) - event.acquisition_.abort(e) - raise e - if need_to_run_after_exposure_hooks: - for camera_device_name in camera_image_counts.keys(): - if self.core.is_sequence_running(camera_device_name): - # all of the sequences are not yet done, so this will need to be handled - # on another iteration of the loop - break - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_EXPOSURE)) - for h in event.acquisition_.get_after_exposure_hooks(): - h.run(event) - need_to_run_after_exposure_hooks = False - - if timeout: - break - # Doesn't seem to be a version in the API in which you don't have to do this - actual_cam_index = cam_index - if "Multi Camera-CameraChannelIndex" in ti.tags.keys() : - actual_cam_index = ti.tags["Multi Camera-CameraChannelIndex"] - if num_cam_channels == 1: - # probably a mistake in the core.... - actual_cam_index = 0 # Override index because not using multi cam mode right now - - corresponding_event = event - if event.get_sequence() is not None: - # Find the event that corresponds to the camera that captured this image. - # This assumes that the images from a single camera are in order - # in the sequence, though different camera images may be interleaved - if event.get_sequence()[0].get_camera_device_name() is not None: - # camera is specified in the acquisition event. Find the first event that matches - # this camera name. - the_camera_name = camera_name - corresponding_event = next(filter(lambda - e: e.get_camera_device_name() is not None and e.get_camera_device_name() == the_camera_name, - multi_cam_adapter_camera_event_lists.get(actual_cam_index))) - multi_cam_adapter_camera_event_lists.get(actual_cam_index).remove(corresponding_event) - else: - # multi camera adapter or just using the default camera - corresponding_event = multi_cam_adapter_camera_event_lists.get(actual_cam_index).pop(0) - # add standard metadata - AcqEngMetadata.add_image_metadata(self.core, ti.tags, corresponding_event, - current_time_ms - corresponding_event.acquisition_.get_start_time_ms(), - exposure) - # add user metadata specified in the event - corresponding_event.acquisition_.add_tags_to_tagged_image(ti.tags, corresponding_event.get_tags()) - corresponding_event.acquisition_.add_to_image_metadata(ti.tags) - corresponding_event.acquisition_.add_to_output(ti) - - self.stop_hardware_sequences(hardware_sequences_in_progress) - - if event.get_sequence() is not None: - event.acquisition_.post_notification(AcqNotification( - AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_SEQUENCE_STOPPED)) - - if timeout: - raise TimeoutError("Timeout waiting for images to arrive in circular buffer") + + acquisition.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SEQUENCE_STARTED)) + + # add standard metadata + # TODO + # AcqEngMetadata.add_image_metadata(self.core, ti.tags, corresponding_event, + # current_time_ms - corresponding_event.acquisition_.get_start_time_ms(), + # exposure) + # add user metadata specified in the event + # acquisition.add_tags_to_tagged_image(ti.tags, corresponding_event.get_tags()) + + + + acquisition._add_to_output(ti) + + # TODO stop sequences + # TODO: exceptiopn handling + # TODO: shutdown def abort_if_requested(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: if event.acquisition_.is_abort_requested(): @@ -440,7 +187,7 @@ def move_xy_stage(event): if event.is_xy_sequenced(): self.core.start_xy_stage_sequence(xy_stage) else: - # Could be sequenced over other devices, in that case get xy position from first in sequence + # Could be sequenced over other devices.py, in that case get xy position from first in sequence prev_x_position = None if self.last_event is None else None if self.last_event.get_sequence() is None else \ self.last_event.get_sequence()[0].get_x_position() x_position = event.get_sequence()[ @@ -630,8 +377,8 @@ def change_additional_properties(event): self.last_event = None # Update all hardware if switching to a new acquisition - # Other stage devices - loop_hardware_command_retries(lambda: move_other_stage_devices(event), "Moving other stage devices") + # Other stage devices.py + loop_hardware_command_retries(lambda: move_other_stage_devices(event), "Moving other stage devices.py") # XY Stage loop_hardware_command_retries(lambda: move_xy_stage(event), "Moving XY stage") # Channels @@ -746,7 +493,7 @@ def is_sequencable(self, previous_events, next_event, new_seq_length): return False # arbitrary z stages - # TODO implement sequences along arbitrary other stage devices + # TODO implement sequences along arbitrary other stage devices.py for stage_device in previous_event.get_stage_device_names(): return False diff --git a/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py b/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py deleted file mode 100644 index 6bc6fa90..00000000 --- a/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py +++ /dev/null @@ -1,267 +0,0 @@ -import json -import queue -import traceback -import threading - -from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata -from pycromanager.acquisition.acq_eng_py.internal.engine import Engine -from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification -from pycromanager.acquisition.acq_eng_py.internal.notification_handler import NotificationHandler - - -class Acquisition(): - - EVENT_GENERATION_HOOK = 0 - # This hook runs before changes to the hardware (corresponding to the instructions in the - # event) are made - BEFORE_HARDWARE_HOOK = 1 - # This hook runs after all changes to the hardware except dor setting th Z drive have been - # made. This is useful for things such as autofocus. - BEFORE_Z_DRIVE = 2 - # This hook runs after changes to the hardware took place, but before camera exposure - # (either a snap or a sequence) is started - AFTER_HARDWARE_HOOK = 3 - # Hook runs after the camera sequence acquisition has started. This can be used for - # external triggering of the camera - AFTER_CAMERA_HOOK = 4 - # Hook runs after the camera exposure ended (when possible, before readout of the camera - # and availability of the images in memory). - AFTER_EXPOSURE_HOOK = 5 - - IMAGE_QUEUE_SIZE = 30 - - def __init__(self, sink, summary_metadata_processor=None, initialize=True): - self.xy_stage_ = None - self.events_finished_ = threading.Event() - self.abort_requested_ = threading.Event() - self.start_time_ms_ = -1 - self.paused_ = False - self.event_generation_hooks_ = [] - self.before_hardware_hooks_ = [] - self.before_z_hooks_ = [] - self.after_hardware_hooks_ = [] - self.after_camera_hooks_ = [] - self.after_exposure_hooks_ = [] - self.image_processors_ = [] - self.first_dequeue_ = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) - self.processor_output_queues_ = {} - self.debug_mode_ = False - self.abort_exception_ = None - self.image_metadata_processor_ = None - self.notification_handler_ = NotificationHandler() - self.started_ = False - self.core_ = Engine.get_core() - self.summary_metadata_processor_ = summary_metadata_processor - self.data_sink_ = sink - if initialize: - self.initialize() - - def post_notification(self, notification): - self.notification_handler_.post_notification(notification) - - def add_acq_notification_listener(self, post_notification_fn): - self.notification_handler_.add_listener(post_notification_fn) - - def get_data_sink(self): - return self.data_sink_ - - def set_debug_mode(self, debug): - self.debug_mode_ = debug - - def is_debug_mode(self): - return self.debug_mode_ - - def is_abort_requested(self): - return self.abort_requested_.is_set() - - def abort(self, e=None): - if e: - self.abort_exception_ = e - if self.abort_requested_.is_set(): - return - self.abort_requested_.set() - if self.is_paused(): - self.set_paused(False) - Engine.get_instance().finish_acquisition(self) - - def check_for_exceptions(self): - if self.abort_exception_: - raise self.abort_exception_ - - def add_to_summary_metadata(self, summary_metadata): - if self.summary_metadata_processor_: - self.summary_metadata_processor_(summary_metadata) - - def add_to_image_metadata(self, tags): - if self.image_metadata_processor_: - self.image_metadata_processor_(tags) - - def add_tags_to_tagged_image(self, tags, more_tags): - if not more_tags: - return - more_tags_object = json.loads(json.dumps(more_tags)) - tags['AcqEngMetadata.TAGS'] = more_tags_object - - def submit_event_iterator(self, evt): - if not self.started_: - self.start() - return Engine.get_instance().submit_event_iterator(evt) - - def start_saving_thread(self): - def saving_thread(acq): - try: - while True: - if acq.debug_mode_: - acq.core_.log_message(f"Image queue size: {len(acq.first_dequeue_)}") - if not acq.image_processors_: - if acq.debug_mode_: - acq.core_.log_message("waiting for image to save") - img = acq.first_dequeue_.get() - if acq.debug_mode_: - acq.core_.log_message("got image to save") - acq.save_image(img) - if img.tags is None and img.pix is None: - break - else: - img = acq.processor_output_queues_[acq.image_processors_[-1]].get() - if acq.data_sink_: - if acq.debug_mode_: - acq.core_.log_message("Saving image") - if img.tags is None and img.pix is None: - break - acq.save_image(img) - if acq.debug_mode_: - acq.core_.log_message("Finished saving image") - except Exception as ex: - traceback.print_exc() - acq.abort(ex) - finally: - acq.save_image(acq.core_.TaggedImage(None, None)) - - threading.Thread(target=saving_thread, args=(self,)).start() - - def add_image_processor(self, p): - if self.started_: - raise RuntimeError("Cannot add processor after acquisition started") - self.image_processors_.append(p) - self.processor_output_queues_[p] = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) - if len(self.image_processors_) == 1: - p.set_acq_and_queues(self, self.first_dequeue_, self.processor_output_queues_[p]) - else: - p.set_acq_and_queues(self, self.processor_output_queues_[self.image_processors_[-2]], - self.processor_output_queues_[self.image_processors_[-1]]) - - def add_hook(self, h, type_): - if self.started_: - raise RuntimeError("Cannot add hook after acquisition started") - if type_ == self.EVENT_GENERATION_HOOK: - self.event_generation_hooks_.append(h) - elif type_ == self.BEFORE_HARDWARE_HOOK: - self.before_hardware_hooks_.append(h) - elif type_ == self.BEFORE_Z_HOOK: - self.before_z_hooks_.append(h) - elif type_ == self.AFTER_HARDWARE_HOOK: - self.after_hardware_hooks_.append(h) - elif type_ == self.AFTER_CAMERA_HOOK: - self.after_camera_hooks_.append(h) - elif type_ == self.AFTER_EXPOSURE_HOOK: - self.after_exposure_hooks_.append(h) - - def initialize(self): - summary_metadata = AcqEngMetadata.make_summary_metadata(self.core_, self) - self.add_to_summary_metadata(summary_metadata) - if self.data_sink_: - self.data_sink_.initialize(summary_metadata) - - def start(self): - if self.data_sink_: - self.start_saving_thread() - self.post_notification(AcqNotification.create_acq_started_notification()) - self.started_ = True - - def save_image(self, image): - if image.tags is None and image.pix is None: - self.data_sink_.finish() - self.post_notification(AcqNotification.create_data_sink_finished_notification()) - else: - pixels, metadata = image.pix, image.tags - axes = AcqEngMetadata.get_axes(metadata) - self.data_sink_.put_image(axes, pixels, metadata) - self.post_notification(AcqNotification.create_image_saved_notification(axes)) - - def get_start_time_ms(self): - return self.start_time_ms_ - - def set_start_time_ms(self, time): - self.start_time_ms_ = time - - def is_paused(self): - return self.paused_ - - def is_started(self): - return self.started_ - - def set_paused(self, pause): - self.paused_ = pause - - def get_summary_metadata(self): - return self.summary_metadata_ - - # perhaps not needed in python like it is in java - # def anything_acquired(self): - # return not self.data_sink_ or self.data_sink_.anything_acquired() - - def add_image_metadata_processor(self, processor): - if not self.image_metadata_processor_: - self.image_metadata_processor_ = processor - else: - raise RuntimeError("Multiple metadata processors not supported") - - def get_event_generation_hooks(self): - return self.event_generation_hooks_ - - def get_before_hardware_hooks(self): - return self.before_hardware_hooks_ - - def get_before_z_hooks(self): - return self.before_z_hooks_ - - def get_after_hardware_hooks(self): - return self.after_hardware_hooks_ - - def get_after_camera_hooks(self): - return self.after_camera_hooks_ - - def get_after_exposure_hooks(self): - return self.after_exposure_hooks_ - - def add_to_output(self, ti): - try: - if ti.tags is None and ti.pix is None: - self.events_finished_.set() - self.first_dequeue_.put(ti) - except Exception as ex: - raise RuntimeError(ex) - - def finish(self): - Engine.get_instance().finish_acquisition(self) - - def are_events_finished(self): - return self.events_finished_.is_set() - - def block_until_events_finished(self, timeout=None): - """Blocks until all events have been processed.""" - self.events_finished_.wait(timeout) - - def block_unless_aborted(self, timeout_ms=None): - """Blocks until acquisition is aborted.""" - self.abort_requested_.wait(timeout_ms / 1000) - - - def get_image_transfer_queue_size(self): - return self.IMAGE_QUEUE_SIZE - - def get_image_transfer_queue_count(self): - return len(self.first_dequeue_) - - diff --git a/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py b/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py index aef8807e..f5048137 100644 --- a/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py +++ b/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py @@ -114,7 +114,7 @@ def make_summary_metadata(core, acq): AcqEngMetadata.set_pixel_type_from_byte_depth(summary, byte_depth) AcqEngMetadata.set_pixel_size_um(summary, core.get_pixel_size_um()) - # Info about core devices + # Info about core devices.py try: AcqEngMetadata.set_core_xy(summary, core.get_xy_stage_device()) AcqEngMetadata.set_core_focus(summary, core.get_focus_device()) @@ -125,7 +125,7 @@ def make_summary_metadata(core, acq): AcqEngMetadata.set_core_slm(summary, core.get_slm_device()) AcqEngMetadata.set_core_shutter(summary, core.get_shutter_device()) except Exception as e: - raise RuntimeError("couldn't get info from core about devices") + raise RuntimeError("couldn't get info from core about devices.py") # TODO restore # # Affine transform diff --git a/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py b/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py index 22d7601d..cb5f6289 100644 --- a/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py +++ b/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py @@ -1,451 +1,451 @@ -from collections import namedtuple -import json -from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata - -class AcquisitionEvent: - class SpecialFlag: - ACQUISITION_FINISHED = "AcqusitionFinished" - ACQUISITION_SEQUENCE_END = "AcqusitionSequenceEnd" - - def __init__(self, acq, sequence=None): - self.acquisition_ = acq - self.axisPositions_ = {} - self.camera_ = None - self.timeout_ms_ = None - self.configGroup_ = None - self.configPreset_ = None - self.exposure_ = None - self.miniumumStartTime_ms_ = None - self.zPosition_ = None - self.xPosition_ = None - self.yPosition_ = None - self.stageCoordinates_ = {} - self.stageDeviceNamesToAxisNames_ = {} - self.tags_ = {} - self.acquireImage_ = None - self.slmImage_ = None - self.properties_ = set() - self.sequence_ = None - self.xySequenced_ = False - self.zSequenced_ = False - self.exposureSequenced_ = False - self.configGroupSequenced_ = False - self.specialFlag_ = None - - if sequence: - self.acquisition_ = sequence[0].acquisition_ - self.miniumumStartTime_ms_ = sequence[0].miniumumStartTime_ms_ - self.sequence_ = list(sequence) - zPosSet = set() - xPosSet = set() - yPosSet = set() - exposureSet = set() - configSet = set() - for event in self.sequence_: - if event.zPosition_ is not None: - zPosSet.add(event.get_z_position()) - if event.xPosition_ is not None: - xPosSet.add(event.get_x_position()) - if event.yPosition_ is not None: - yPosSet.add(event.get_y_position()) - if event.exposure_ is not None: - exposureSet.add(event.get_exposure()) - if event.configPreset_ is not None: - configSet.add(event.get_config_preset()) - self.exposureSequenced_ = len(exposureSet) > 1 - self.configGroupSequenced_ = len(configSet) > 1 - self.xySequenced_ = len(xPosSet) > 1 and len(yPosSet) > 1 - self.zSequenced_ = len(zPosSet) > 1 - if sequence[0].exposure_ and not self.exposureSequenced_: - self.exposure_ = sequence[0].exposure_ - - - def copy(self): - e = AcquisitionEvent(self.acquisition_) - e.axisPositions_ = self.axisPositions_.copy() - e.configPreset_ = self.configPreset_ - e.configGroup_ = self.configGroup_ - e.stageCoordinates_ = self.stageCoordinates_.copy() - e.stageDeviceNamesToAxisNames_ = self.stageDeviceNamesToAxisNames_.copy() - e.xPosition_ = self.xPosition_ - e.yPosition_ = self.yPosition_ - e.zPosition_ = self.zPosition_ - e.miniumumStartTime_ms_ = self.miniumumStartTime_ms_ - e.slmImage_ = self.slmImage_ - e.acquireImage_ = self.acquireImage_ - e.properties_ = set(self.properties_) - e.camera_ = self.camera_ - e.timeout_ms_ = self.timeout_ms_ - e.setTags(self.tags_) # Assuming setTags is a method in the class - return e - - @staticmethod - def event_to_json(e): - data = {} - - if e.is_acquisition_finished_event(): - data["special"] = "acquisition-end" - return data - elif e.is_acquisition_sequence_end_event(): - data["special"] = "sequence-end" - return data - - if e.miniumumStartTime_ms_: - data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 - - if e.has_config_group(): - data["config_group"] = [e.configGroup_, e.configPreset_] - - if e.exposure_ is not None: - data["exposure"] = e.exposure_ - - if e.slmImage_: - data["slm_pattern"] = e.slmImage_ - - if e.timeout_ms_ is not None: - data["timeout_ms"] = e.timeout_ms_ - - axes = {axis: e.axisPositions_[axis] for axis in e.axisPositions_} - if axes: - data["axes"] = axes - - stage_positions = [[stageDevice, e.get_stage_single_axis_stage_position(stageDevice)] - for stageDevice in e.get_stage_device_names()] - if stage_positions: - data["stage_positions"] = stage_positions - - if e.zPosition_ is not None: - data["z"] = e.zPosition_ - - if e.xPosition_ is not None: - data["x"] = e.xPosition_ - - if e.yPosition_ is not None: - data["y"] = e.yPosition_ - - if e.camera_: - data["camera"] = e.camera_ - - if e.get_tags() and e.get_tags(): # Assuming getTags is a method in the class - data["tags"] = {key: value for key, value in e.getTags().items()} - - props = [[t.dev, t.prop, t.val] for t in e.properties_] - if props: - data["properties"] = props - - return data - - @staticmethod - def event_from_json(data, acq): - if "special" in data: - if data["special"] == "acquisition-end": - return AcquisitionEvent.create_acquisition_finished_event(acq) - elif data["special"] == "sequence-end": - return AcquisitionEvent.create_acquisition_sequence_end_event(acq) - - event = AcquisitionEvent(acq) - - if "axes" in data: - for axisLabel, value in data["axes"].items(): - event.axisPositions_[axisLabel] = value - - if "min_start_time" in data: - event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) - - if "timeout_ms" in data: - event.timeout_ms_ = float(data["timeout_ms"]) - - if "config_group" in data: - event.configGroup_ = data["config_group"][0] - event.configPreset_ = data["config_group"][1] - - if "exposure" in data: - event.exposure_ = float(data["exposure"]) - - # if "timeout_ms" in data: - # event.slmImage_ = float(data["timeout_ms"]) - - if "stage_positions" in data: - for stagePos in data["stage_positions"]: - event.set_stage_coordinate(stagePos[0], stagePos[1]) - - if "z" in data: - event.zPosition_ = float(data["z"]) - - if "stage" in data: - deviceName = data["stage"]["device_name"] - position = data["stage"]["position"] - event.axisPositions_[deviceName] = float(position) - if "axis_name" in data["stage"]: - axisName = data["stage"]["axis_name"] - event.stageDeviceNamesToAxisNames_[deviceName] = axisName - - # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants - # if isinstance(event.acquisition_, XYTiledAcquisition): - # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( - # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], - # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] - # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() - # event.xPosition_ = xyPos.x - # event.yPosition_ = xyPos.y - - if "x" in data: - event.xPosition_ = float(data["x"]) - - if "y" in data: - event.yPosition_ = float(data["y"]) - - if "slm_pattern" in data: - event.slmImage_ = data["slm_pattern"] - - if "camera" in data: - event.camera_ = data["camera"] - - if "tags" in data: - tags = {key: value for key, value in data["tags"].items()} - event.setTags(tags) - - if "properties" in data: - for trip in data["properties"]: - t = ThreeTuple(trip[0], trip[1], trip[2]) - event.properties_.add(t) - - return event - - def to_json(self): - if self.sequence_: - events = [self.event_to_json(e) for e in self.sequence_] - return events - else: - return self.event_to_json(self) - - @staticmethod - def from_json(data, acq): - if not isinstance(data, list): - return AcquisitionEvent.event_from_json(data, acq) - else: - sequence = [AcquisitionEvent.event_from_json(event, acq) for event in data] - return AcquisitionEvent(acq, sequence=sequence) - - def get_camera_device_name(self): - return self.camera_ - - def set_camera_device_name(self, camera): - self.camera_ = camera - - def get_additional_properties(self): - return [(t.dev, t.prop, t.val) for t in self.properties_] - - def should_acquire_image(self): - if self.sequence_: - return True - return self.configPreset_ is not None or self.axisPositions_ is not None - - def has_config_group(self): - return self.configPreset_ is not None and self.configGroup_ is not None - - def get_config_preset(self): - return self.configPreset_ - - def get_config_group(self): - return self.configGroup_ - - def set_config_preset(self, config): - self.configPreset_ = config - - def set_config_group(self, group): - self.configGroup_ = group - - def get_exposure(self): - return self.exposure_ - - def set_exposure(self, exposure): - self.exposure_ = exposure - - def set_property(self, device, property, value): - self.properties_.add(ThreeTuple(device, property, value)) - - def set_minimum_start_time(self, l): - self.miniumumStartTime_ms_ = l - - def get_defined_axes(self): - return set(self.axisPositions_.keys()) - - def set_axis_position(self, label, position): - if position is None: - raise Exception("Cannot set axis position to null") - self.axisPositions_[label] = position - - def set_stage_coordinate(self, deviceName, v, axisName=None): - self.stageCoordinates_[deviceName] = v - self.stageDeviceNamesToAxisNames_[deviceName] = deviceName if axisName is None else axisName - - def get_stage_single_axis_stage_position(self, deviceName): - return self.stageCoordinates_.get(deviceName) - - def get_axis_positions(self): - return self.axisPositions_ - - def get_axis_position(self, label): - return self.axisPositions_.get(label) - - def get_timeout_ms(self): - return self.timeout_ms_ - - def set_time_index(self, index): - self.set_axis_position(AcqEngMetadata.TIME_AXIS, index) - - def set_channel_name(self, name): - self.set_axis_position(AcqEngMetadata.CHANNEL_AXIS, name) - - def get_slm_image(self): - return self.slmImage_ - - def set_z(self, index, position): - if index is not None: - self.set_axis_position(AcqEngMetadata.Z_AXIS, index) - self.zPosition_ = position - - def get_t_index(self): - return self.get_axis_position(AcqEngMetadata.TIME_AXIS) - - def get_z_index(self): - return self.get_axis_position(AcqEngMetadata.Z_AXIS) - - def get_device_axis_name(self, deviceName): - if deviceName not in self.stageDeviceNamesToAxisNames_: - raise Exception(f"No axis name for device {deviceName}. call setStageCoordinate first") - return self.stageDeviceNamesToAxisNames_[deviceName] - - def get_stage_device_names(self): - return set(self.stageDeviceNamesToAxisNames_.keys()) - - @staticmethod - def create_acquisition_finished_event(acq): - evt = AcquisitionEvent(acq) - evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED - return evt - - def is_acquisition_finished_event(self): - return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED - - @staticmethod - def create_acquisition_sequence_end_event(acq): - evt = AcquisitionEvent(acq) - evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END - return evt - - def is_acquisition_sequence_end_event(self): - return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END - - def get_z_position(self): - return self.zPosition_ - - def get_minimum_start_time_absolute(self): - if self.miniumumStartTime_ms_ is None: - return None - return self.acquisition_.get_start_time_ms() + self.miniumumStartTime_ms_ - - def get_sequence(self): - return self.sequence_ - - def is_exposure_sequenced(self): - return self.exposureSequenced_ - - def is_config_group_sequenced(self): - return self.configGroupSequenced_ - - def is_xy_sequenced(self): - return self.xySequenced_ - - def is_z_sequenced(self): - return self.zSequenced_ - - def get_x_position(self): - return self.xPosition_ - - def get_camera_image_counts(self, default_camera_device_name): - """ - Get the number of images to be acquired on each camera in a sequence event. - For a non-sequence event, the number of images is 1, and the camera is the core camera. - This is passed in as an argument in order to avoid this class talking to the core directly. - - Args: - default_camera_device_name (str): Default camera device name. - - Returns: - defaultdict: Dictionary containing the camera device names as keys and image counts as values. - """ - # Figure out how many images on each camera and start sequence with appropriate number on each - camera_image_counts = {} - camera_device_names = set() - if self.get_sequence() is None: - camera_image_counts[default_camera_device_name] = 1 - return camera_image_counts - - for event in self.get_sequence(): - camera_device_names.add(event.get_camera_device_name() if event.get_camera_device_name() is not None else - default_camera_device_name) - if None in camera_device_names: - camera_device_names.remove(None) - camera_device_names.add(default_camera_device_name) - - for camera_device_name in camera_device_names: - camera_image_counts[camera_device_name] = sum(1 for event in self.get_sequence() - if event.get_camera_device_name() == camera_device_name) - - if len(camera_device_names) == 1 and camera_device_name == default_camera_device_name: - camera_image_counts[camera_device_name] = len(self.get_sequence()) - - return camera_image_counts - - def get_y_position(self): - return self.yPosition_ - - def get_position_name(self): - axisPosition_ = self.get_axis_position(AcqEngMetadata.POSITION_AXIS) - if isinstance(axisPosition_, str): - return axisPosition_ - return None - - def set_x(self, x): - self.xPosition_ = x - - def set_y(self, y): - self.yPosition_ = y - - def set_tags(self, tags): - self.tags_.clear() - if tags: - self.tags_.update(tags) - - def get_tags(self): - return dict(self.tags_) - - def __str__(self): - if self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED: - return "Acq finished event" - elif self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END: - return "Acq sequence end event" - - builder = [] - for deviceName in self.stageDeviceNamesToAxisNames_.keys(): - builder.append(f"\t{deviceName}: {self.get_stage_single_axis_stage_position(deviceName)}") - - if self.zPosition_ is not None: - builder.append(f"z {self.zPosition_}") - if self.xPosition_ is not None: - builder.append(f"x {self.xPosition_}") - if self.yPosition_ is not None: - builder.append(f"y {self.yPosition_}") - - for axis in self.axisPositions_.keys(): - builder.append(f"\t{axis}: {self.axisPositions_[axis]}") - - if self.camera_ is not None: - builder.append(f"\t{self.camera_}: {self.camera_}") - - return ' '.join(builder) - - -ThreeTuple = namedtuple('ThreeTuple', ['dev', 'prop', 'val']) +# from collections import namedtuple +# import json +# from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +# +# class AcquisitionEvent: +# class SpecialFlag: +# ACQUISITION_FINISHED = "AcqusitionFinished" +# ACQUISITION_SEQUENCE_END = "AcqusitionSequenceEnd" +# +# def __init__(self, acq, sequence=None): +# self.acquisition_ = acq +# self.axisPositions_ = {} +# self.camera_ = None +# self.timeout_ms_ = None +# self.configGroup_ = None +# self.configPreset_ = None +# self.exposure_ = None +# self.miniumumStartTime_ms_ = None +# self.zPosition_ = None +# self.xPosition_ = None +# self.yPosition_ = None +# self.stageCoordinates_ = {} +# self.stageDeviceNamesToAxisNames_ = {} +# self.tags_ = {} +# self.acquireImage_ = None +# self.slmImage_ = None +# self.properties_ = set() +# self.sequence_ = None +# self.xySequenced_ = False +# self.zSequenced_ = False +# self.exposureSequenced_ = False +# self.configGroupSequenced_ = False +# self.specialFlag_ = None +# +# if sequence: +# self.acquisition_ = sequence[0].acquisition_ +# self.miniumumStartTime_ms_ = sequence[0].miniumumStartTime_ms_ +# self.sequence_ = list(sequence) +# zPosSet = set() +# xPosSet = set() +# yPosSet = set() +# exposureSet = set() +# configSet = set() +# for event in self.sequence_: +# if event.zPosition_ is not None: +# zPosSet.add(event.get_z_position()) +# if event.xPosition_ is not None: +# xPosSet.add(event.get_x_position()) +# if event.yPosition_ is not None: +# yPosSet.add(event.get_y_position()) +# if event.exposure_ is not None: +# exposureSet.add(event.get_exposure()) +# if event.configPreset_ is not None: +# configSet.add(event.get_config_preset()) +# self.exposureSequenced_ = len(exposureSet) > 1 +# self.configGroupSequenced_ = len(configSet) > 1 +# self.xySequenced_ = len(xPosSet) > 1 and len(yPosSet) > 1 +# self.zSequenced_ = len(zPosSet) > 1 +# if sequence[0].exposure_ and not self.exposureSequenced_: +# self.exposure_ = sequence[0].exposure_ +# +# +# def copy(self): +# e = AcquisitionEvent(self.acquisition_) +# e.axisPositions_ = self.axisPositions_.copy() +# e.configPreset_ = self.configPreset_ +# e.configGroup_ = self.configGroup_ +# e.stageCoordinates_ = self.stageCoordinates_.copy() +# e.stageDeviceNamesToAxisNames_ = self.stageDeviceNamesToAxisNames_.copy() +# e.xPosition_ = self.xPosition_ +# e.yPosition_ = self.yPosition_ +# e.zPosition_ = self.zPosition_ +# e.miniumumStartTime_ms_ = self.miniumumStartTime_ms_ +# e.slmImage_ = self.slmImage_ +# e.acquireImage_ = self.acquireImage_ +# e.properties_ = set(self.properties_) +# e.camera_ = self.camera_ +# e.timeout_ms_ = self.timeout_ms_ +# e.setTags(self.tags_) # Assuming setTags is a method in the class +# return e +# +# @staticmethod +# def event_to_json(e): +# data = {} +# +# if e.is_acquisition_finished_event(): +# data["special"] = "acquisition-end" +# return data +# elif e.is_acquisition_sequence_end_event(): +# data["special"] = "sequence-end" +# return data +# +# if e.miniumumStartTime_ms_: +# data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 +# +# if e.has_config_group(): +# data["config_group"] = [e.configGroup_, e.configPreset_] +# +# if e.exposure_ is not None: +# data["exposure"] = e.exposure_ +# +# if e.slmImage_: +# data["slm_pattern"] = e.slmImage_ +# +# if e.timeout_ms_ is not None: +# data["timeout_ms"] = e.timeout_ms_ +# +# axes = {axis: e.axisPositions_[axis] for axis in e.axisPositions_} +# if axes: +# data["axes"] = axes +# +# stage_positions = [[stageDevice, e.get_stage_single_axis_stage_position(stageDevice)] +# for stageDevice in e.get_stage_device_names()] +# if stage_positions: +# data["stage_positions"] = stage_positions +# +# if e.zPosition_ is not None: +# data["z"] = e.zPosition_ +# +# if e.xPosition_ is not None: +# data["x"] = e.xPosition_ +# +# if e.yPosition_ is not None: +# data["y"] = e.yPosition_ +# +# if e.camera_: +# data["camera"] = e.camera_ +# +# if e.get_tags() and e.get_tags(): # Assuming getTags is a method in the class +# data["tags"] = {key: value for key, value in e.getTags().items()} +# +# props = [[t.dev, t.prop, t.val] for t in e.properties_] +# if props: +# data["properties"] = props +# +# return data +# +# @staticmethod +# def event_from_json(data, acq): +# if "special" in data: +# if data["special"] == "acquisition-end": +# return AcquisitionEvent.create_acquisition_finished_event(acq) +# elif data["special"] == "sequence-end": +# return AcquisitionEvent.create_acquisition_sequence_end_event(acq) +# +# event = AcquisitionEvent(acq) +# +# if "axes" in data: +# for axisLabel, value in data["axes"].items(): +# event.axisPositions_[axisLabel] = value +# +# if "min_start_time" in data: +# event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) +# +# if "timeout_ms" in data: +# event.timeout_ms_ = float(data["timeout_ms"]) +# +# if "config_group" in data: +# event.configGroup_ = data["config_group"][0] +# event.configPreset_ = data["config_group"][1] +# +# if "exposure" in data: +# event.exposure_ = float(data["exposure"]) +# +# # if "timeout_ms" in data: +# # event.slmImage_ = float(data["timeout_ms"]) +# +# if "stage_positions" in data: +# for stagePos in data["stage_positions"]: +# event.set_stage_coordinate(stagePos[0], stagePos[1]) +# +# if "z" in data: +# event.zPosition_ = float(data["z"]) +# +# if "stage" in data: +# deviceName = data["stage"]["device_name"] +# position = data["stage"]["position"] +# event.axisPositions_[deviceName] = float(position) +# if "axis_name" in data["stage"]: +# axisName = data["stage"]["axis_name"] +# event.stageDeviceNamesToAxisNames_[deviceName] = axisName +# +# # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants +# # if isinstance(event.acquisition_, XYTiledAcquisition): +# # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( +# # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], +# # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] +# # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() +# # event.xPosition_ = xyPos.x +# # event.yPosition_ = xyPos.y +# +# if "x" in data: +# event.xPosition_ = float(data["x"]) +# +# if "y" in data: +# event.yPosition_ = float(data["y"]) +# +# if "slm_pattern" in data: +# event.slmImage_ = data["slm_pattern"] +# +# if "camera" in data: +# event.camera_ = data["camera"] +# +# if "tags" in data: +# tags = {key: value for key, value in data["tags"].items()} +# event.setTags(tags) +# +# if "properties" in data: +# for trip in data["properties"]: +# t = ThreeTuple(trip[0], trip[1], trip[2]) +# event.properties_.add(t) +# +# return event +# +# def to_json(self): +# if self.sequence_: +# events = [self.event_to_json(e) for e in self.sequence_] +# return events +# else: +# return self.event_to_json(self) +# +# @staticmethod +# def from_json(data, acq): +# if not isinstance(data, list): +# return AcquisitionEvent.event_from_json(data, acq) +# else: +# sequence = [AcquisitionEvent.event_from_json(event, acq) for event in data] +# return AcquisitionEvent(acq, sequence=sequence) +# +# def get_camera_device_name(self): +# return self.camera_ +# +# def set_camera_device_name(self, camera): +# self.camera_ = camera +# +# def get_additional_properties(self): +# return [(t.dev, t.prop, t.val) for t in self.properties_] +# +# def should_acquire_image(self): +# if self.sequence_: +# return True +# return self.configPreset_ is not None or self.axisPositions_ is not None +# +# def has_config_group(self): +# return self.configPreset_ is not None and self.configGroup_ is not None +# +# def get_config_preset(self): +# return self.configPreset_ +# +# def get_config_group(self): +# return self.configGroup_ +# +# def set_config_preset(self, config): +# self.configPreset_ = config +# +# def set_config_group(self, group): +# self.configGroup_ = group +# +# def get_exposure(self): +# return self.exposure_ +# +# def set_exposure(self, exposure): +# self.exposure_ = exposure +# +# def set_property(self, device, property, value): +# self.properties_.add(ThreeTuple(device, property, value)) +# +# def set_minimum_start_time(self, l): +# self.miniumumStartTime_ms_ = l +# +# def get_defined_axes(self): +# return set(self.axisPositions_.keys()) +# +# def set_axis_position(self, label, position): +# if position is None: +# raise Exception("Cannot set axis position to null") +# self.axisPositions_[label] = position +# +# def set_stage_coordinate(self, deviceName, v, axisName=None): +# self.stageCoordinates_[deviceName] = v +# self.stageDeviceNamesToAxisNames_[deviceName] = deviceName if axisName is None else axisName +# +# def get_stage_single_axis_stage_position(self, deviceName): +# return self.stageCoordinates_.get(deviceName) +# +# def get_axis_positions(self): +# return self.axisPositions_ +# +# def get_axis_position(self, label): +# return self.axisPositions_.get(label) +# +# def get_timeout_ms(self): +# return self.timeout_ms_ +# +# def set_time_index(self, index): +# self.set_axis_position(AcqEngMetadata.TIME_AXIS, index) +# +# def set_channel_name(self, name): +# self.set_axis_position(AcqEngMetadata.CHANNEL_AXIS, name) +# +# def get_slm_image(self): +# return self.slmImage_ +# +# def set_z(self, index, position): +# if index is not None: +# self.set_axis_position(AcqEngMetadata.Z_AXIS, index) +# self.zPosition_ = position +# +# def get_t_index(self): +# return self.get_axis_position(AcqEngMetadata.TIME_AXIS) +# +# def get_z_index(self): +# return self.get_axis_position(AcqEngMetadata.Z_AXIS) +# +# def get_device_axis_name(self, deviceName): +# if deviceName not in self.stageDeviceNamesToAxisNames_: +# raise Exception(f"No axis name for device {deviceName}. call setStageCoordinate first") +# return self.stageDeviceNamesToAxisNames_[deviceName] +# +# def get_stage_device_names(self): +# return set(self.stageDeviceNamesToAxisNames_.keys()) +# +# @staticmethod +# def create_acquisition_finished_event(acq): +# evt = AcquisitionEvent(acq) +# evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED +# return evt +# +# def is_acquisition_finished_event(self): +# return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED +# +# @staticmethod +# def create_acquisition_sequence_end_event(acq): +# evt = AcquisitionEvent(acq) +# evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END +# return evt +# +# def is_acquisition_sequence_end_event(self): +# return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END +# +# def get_z_position(self): +# return self.zPosition_ +# +# def get_minimum_start_time_absolute(self): +# if self.miniumumStartTime_ms_ is None: +# return None +# return self.acquisition_.get_start_time_ms() + self.miniumumStartTime_ms_ +# +# def get_sequence(self): +# return self.sequence_ +# +# def is_exposure_sequenced(self): +# return self.exposureSequenced_ +# +# def is_config_group_sequenced(self): +# return self.configGroupSequenced_ +# +# def is_xy_sequenced(self): +# return self.xySequenced_ +# +# def is_z_sequenced(self): +# return self.zSequenced_ +# +# def get_x_position(self): +# return self.xPosition_ +# +# def get_camera_image_counts(self, default_camera_device_name): +# """ +# Get the number of images to be acquired on each camera in a sequence event. +# For a non-sequence event, the number of images is 1, and the camera is the core camera. +# This is passed in as an argument in order to avoid this class talking to the core directly. +# +# Args: +# default_camera_device_name (str): Default camera device name. +# +# Returns: +# defaultdict: Dictionary containing the camera device names as keys and image counts as values. +# """ +# # Figure out how many images on each camera and start sequence with appropriate number on each +# camera_image_counts = {} +# camera_device_names = set() +# if self.get_sequence() is None: +# camera_image_counts[default_camera_device_name] = 1 +# return camera_image_counts +# +# for event in self.get_sequence(): +# camera_device_names.add(event.get_camera_device_name() if event.get_camera_device_name() is not None else +# default_camera_device_name) +# if None in camera_device_names: +# camera_device_names.remove(None) +# camera_device_names.add(default_camera_device_name) +# +# for camera_device_name in camera_device_names: +# camera_image_counts[camera_device_name] = sum(1 for event in self.get_sequence() +# if event.get_camera_device_name() == camera_device_name) +# +# if len(camera_device_names) == 1 and camera_device_name == default_camera_device_name: +# camera_image_counts[camera_device_name] = len(self.get_sequence()) +# +# return camera_image_counts +# +# def get_y_position(self): +# return self.yPosition_ +# +# def get_position_name(self): +# axisPosition_ = self.get_axis_position(AcqEngMetadata.POSITION_AXIS) +# if isinstance(axisPosition_, str): +# return axisPosition_ +# return None +# +# def set_x(self, x): +# self.xPosition_ = x +# +# def set_y(self, y): +# self.yPosition_ = y +# +# def set_tags(self, tags): +# self.tags_.clear() +# if tags: +# self.tags_.update(tags) +# +# def get_tags(self): +# return dict(self.tags_) +# +# def __str__(self): +# if self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED: +# return "Acq finished event" +# elif self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END: +# return "Acq sequence end event" +# +# builder = [] +# for deviceName in self.stageDeviceNamesToAxisNames_.keys(): +# builder.append(f"\t{deviceName}: {self.get_stage_single_axis_stage_position(deviceName)}") +# +# if self.zPosition_ is not None: +# builder.append(f"z {self.zPosition_}") +# if self.xPosition_ is not None: +# builder.append(f"x {self.xPosition_}") +# if self.yPosition_ is not None: +# builder.append(f"y {self.yPosition_}") +# +# for axis in self.axisPositions_.keys(): +# builder.append(f"\t{axis}: {self.axisPositions_[axis]}") +# +# if self.camera_ is not None: +# builder.append(f"\t{self.camera_}: {self.camera_}") +# +# return ' '.join(builder) +# +# +# ThreeTuple = namedtuple('ThreeTuple', ['dev', 'prop', 'val']) diff --git a/pycromanager/acquisition/acq_eng_py/mm_device_implementations.py b/pycromanager/acquisition/acq_eng_py/mm_device_implementations.py new file mode 100644 index 00000000..9cb36eb6 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/mm_device_implementations.py @@ -0,0 +1,85 @@ +""" +Implementation of Micro-Manager devices.py in terms of the AcqEng bottom API +""" + +from pycromanager.acquisition.acq_eng_py.device_api import SingleAxisMovable, DoubleAxisMovable, Camera +from pycromanager.core import Core +import numpy as np +import pymmcore +import time + + + +class MicroManagerCamera(Camera): + + def __init__(self, device_name=None): + """ + :param device_name: Name of the camera device in Micro-Manager. If None, and there is only one camera, that camera + will be used. If None and there are multiple cameras, an error will be raised + """ + self._core = Core() + camera_names = self._core.get_loaded_devices_of_type(2) # 2 means camera... + if not camera_names: + raise ValueError("No cameras found") + if device_name is None and len(camera_names) > 1: + raise ValueError("Multiple cameras found, must specify device name") + + if device_name is None: + self.device_name = camera_names[0] + else: + if device_name not in camera_names: + raise ValueError(f"Camera {device_name} not found") + self.device_name = device_name + + + def set_exposure(self, exposure: float) -> None: + self._core.set_exposure(self.device_name, exposure) + + def get_exposure(self) -> float: + return self._core.get_exposure(self.device_name) + + def arm(self, frame_count=None) -> None: + if frame_count == 1: + # nothing to prepare because snap will be called + pass + elif frame_count is None: + # No need to prepare for continuous sequence acquisition + pass + else: + self._core.prepare_acquisition() + self._frame_count = 1 + + def start(self) -> None: + if self._frame_count == 1: + # TODO: put this on a different thread so it can return immediately + self._core.snap_image() + elif self._frame_count is None: + # set core camera to this camera because there's no version of this call where you specify the camera + self._core.set_camera_device(self.device_name) + self._core.start_continuous_sequence_acquisition(0) + else: + self._core.start_sequence_acquisition(self._frame_count, 0, True) + + def stop(self) -> None: + self._core.stop_acquisition() + + def pop_image(self, timeout=None) -> (np.ndarray, dict): + if self._frame_count != 1: + md = pymmcore.Metadata() + start_time = time.time() + while True: + pix = self._core.pop_next_image_md(0, 0, md) + if pix is not None: + break + # sleep for the shortest possible time, only to allow the thread to be interrupted and prevent + # GIL weirdness. But perhaps this is not necessary + time.sleep(0.000001) + if timeout is not None and time.time() - start_time > timeout: + return None, None + + metadata = {key: md.GetSingleTag(key).GetValue() for key in md.GetKeys()} + return pix, metadata + else: + # Is there no metadata when calling snapimage? + metadata = {} + return self._core.get_image(), metadata \ No newline at end of file diff --git a/pycromanager/acquisition/acquisition_superclass.py b/pycromanager/acquisition/acquisition_superclass.py index ccf00db9..047972b1 100644 --- a/pycromanager/acquisition/acquisition_superclass.py +++ b/pycromanager/acquisition/acquisition_superclass.py @@ -5,22 +5,20 @@ import copy import types import numpy as np -from typing import Union, List, Iterable +from typing import List, Iterable import warnings from abc import ABCMeta, abstractmethod from docstring_inheritance import NumpyDocstringInheritanceMeta import queue import weakref from pycromanager.acq_future import AcqNotification, AcquisitionFuture -import os import threading from inspect import signature -from typing import Generator from types import GeneratorType -import time from queue import Queue from typing import Generator, Dict, Union +from pycromanager.acquisition.new.acq_events import AcquisitionEvent class EventQueue(Queue): @@ -30,13 +28,14 @@ class EventQueue(Queue): """ def __init__(self, maxsize=0): super().__init__(maxsize) - self.current_generator: Union[Generator[Dict, None, None], None] = None + self.current_generator: Union[Generator[AcquisitionEvent, None, None], None] = None def clear(self): self.queue.clear() self.current_generator = None - def put(self, item: Union[Dict, Generator[Dict, None, None]], block=True, timeout=None): + def put(self, item: Union[AcquisitionEvent, List[AcquisitionEvent], + Generator[AcquisitionEvent, None, None], None], block=True, timeout=None): if isinstance(item, dict): super().put(item, block, timeout) elif isinstance(item, list): @@ -50,7 +49,7 @@ def put(self, item: Union[Dict, Generator[Dict, None, None]], block=True, timeou else: raise TypeError("Event must be a dictionary, list or generator") - def get(self, block=True, timeout=None) -> Dict: + def get(self, block=True, timeout=None) -> AcquisitionEvent: while True: if self.current_generator is None: item = super().get(block, timeout) @@ -247,7 +246,7 @@ def acquire(self, event_or_events: dict or list or Generator) -> AcquisitionFutu """ try: - if self._acq.are_events_finished(): + if self._are_events_finished(): raise AcqAlreadyCompleteException( 'Cannot submit more events because this acquisition is already finished') @@ -260,7 +259,7 @@ def acquire(self, event_or_events: dict or list or Generator) -> AcquisitionFutu acq_future = AcquisitionFuture(self) def notifying_generator(original_generator): - # store in a weakref so that if user code doesn't hange on to AcqFuture + # store in a weakref so that if user code doesn't hang on to AcqFuture # it doesn't needlessly track events acq_future_weakref = weakref.ref(acq_future) for event in original_generator: @@ -269,6 +268,7 @@ def notifying_generator(original_generator): acq_future._monitor_axes(event['axes']) _validate_acq_events(event) yield event + event_or_events = notifying_generator(event_or_events) else: _validate_acq_events(event_or_events) @@ -303,8 +303,14 @@ def abort(self, exception=None): self._event_queue.clear() # Don't send any more events. The event sending thread should know shut itself down by # checking the status of the acquisition - self._acq.abort() + self.abort() + @abstractmethod + def _are_events_finished(self): + """ + Check if all events have been processed and executed + """ + pass def _add_storage_monitor_fn(self, image_saved_fn=None): """ @@ -422,18 +428,18 @@ def _validate_acq_dict(event: dict): def multi_d_acquisition_events( - num_time_points: int=None, - time_interval_s: Union[float, List[float]]=0, - z_start: float=None, - z_end: float=None, - z_step: float=None, - channel_group: str=None, - channels: list=None, - channel_exposures_ms: list=None, - xy_positions: Iterable=None, - xyz_positions: Iterable=None, - position_labels: List[str]=None, - order: str="tpcz", + num_time_points: int = None, + time_interval_s: Union[float, List[float]] = 0, + z_start: float = None, + z_end: float = None, + z_step: float = None, + channel_group: str = None, + channels: list = None, + channel_exposures_ms: list = None, + xy_positions: Iterable = None, + xyz_positions: Iterable = None, + position_labels: List[str] = None, + order: str = "tpcz", ): """Convenience function for generating the events of a typical multi-dimensional acquisition (i.e. an acquisition with some combination of multiple timepoints, channels, z-slices, or xy positions) @@ -443,8 +449,8 @@ def multi_d_acquisition_events( num_time_points : int How many time points if it is a timelapse (Default value = None) time_interval_s : float or list of floats - the minimum interval between consecutive time points in seconds. If set to 0, the - acquisition will go as fast as possible. If a list is provided, its length should + the minimum interval between consecutive time points in seconds. If set to 0, the + acquisition will go as fast as possible. If a list is provided, its length should be equal to 'num_time_points'. Elements in the list are assumed to be the intervals between consecutive timepoints in the timelapse. First element in the list indicates delay before capturing the first image (Default value = 0) @@ -500,7 +506,7 @@ def multi_d_acquisition_events( raise ValueError("xy_positions and position_labels must be of equal length") if xyz_positions is not None and len(xyz_positions) != len(position_labels): raise ValueError("xyz_positions and position_labels must be of equal length") - + # If any of z_start, z_step, z_end are provided, then they should all be provided # Here we can't use `all` as some of the values of z_start, z_step, z_end # may be zero and all((0,)) = False @@ -576,7 +582,7 @@ def generate_events(event, order): elif order[0] == "c" and channel_group is not None and channels is not None: for i in range(len(channels)): new_event = copy.deepcopy(event) - new_event["config_group"] = [channel_group, channels[i]] + new_event["config_group"] = [channel_group, channels[i]] new_event["axes"]["channel"] = channels[i] if channel_exposures_ms is not None: new_event["exposure"] = channel_exposures_ms[i] @@ -608,6 +614,4 @@ def appender(next): events.append(next) appender(generate_events(base_event, order)) - return events - - + return events \ No newline at end of file diff --git a/pycromanager/acquisition/java_backend_acquisitions.py b/pycromanager/acquisition/java_backend_acquisitions.py index 7e4d4d9c..b64763d9 100644 --- a/pycromanager/acquisition/java_backend_acquisitions.py +++ b/pycromanager/acquisition/java_backend_acquisitions.py @@ -350,7 +350,7 @@ def __init__( def await_completion(self): try: - while not self._acq.are_events_finished() or ( + while not self._acq._are_events_finished() or ( self._acq.get_data_sink() is not None and not self._acq.get_data_sink().is_finished()): self._check_for_exceptions() self._acq.block_until_events_finished(0.01) @@ -395,6 +395,10 @@ def get_viewer(self): else: return self._napari_viewer + def abort(self, exception=None): + self._exception = exception + self._acq.abort() + ######## Private methods ########### def _start_receiving_notifications(self): """ @@ -432,6 +436,9 @@ def _check_for_exceptions(self): if self._exception is not None: raise self._exception + def _are_events_finished(self): + return self._acq.are_events_finished() + def _start_events(self, **kwargs): self.event_port = self._acq.get_event_port() diff --git a/pycromanager/acquisition/python_backend_acquisitions.py b/pycromanager/acquisition/python_backend_acquisitions.py index 070adc93..211a0175 100644 --- a/pycromanager/acquisition/python_backend_acquisitions.py +++ b/pycromanager/acquisition/python_backend_acquisitions.py @@ -1,25 +1,32 @@ import warnings from docstring_inheritance import NumpyDocstringInheritanceMeta -from pycromanager.acquisition.acq_eng_py.main.AcqEngPy_Acquisition import Acquisition as pymmcore_Acquisition from pycromanager.acquisition.acquisition_superclass import _validate_acq_events, Acquisition -from pycromanager.acquisition.acq_eng_py.main.acquisition_event import AcquisitionEvent -from pycromanager.acq_future import AcqNotification +from pycromanager.acquisition.new.acq_events import AcquisitionEvent +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification +from pycromanager.acquisition.acq_eng_py.internal.notification_handler import NotificationHandler +from pycromanager.acquisition.acq_eng_py.internal.engine import Engine import threading from inspect import signature import traceback +import queue from ndstorage.ndram_dataset import NDRAMDataset from ndstorage.ndtiff_dataset import NDTiffDataset +from pycromanager.acquisition.acq_eng_py.internal.hooks import EVENT_GENERATION_HOOK, \ + BEFORE_HARDWARE_HOOK, BEFORE_Z_DRIVE_HOOK, AFTER_HARDWARE_HOOK, AFTER_CAMERA_HOOK, AFTER_EXPOSURE_HOOK + + +IMAGE_QUEUE_SIZE = 30 + + class PythonBackendAcquisition(Acquisition, metaclass=NumpyDocstringInheritanceMeta): """ - Pycro-Manager acquisition that uses a Python runtime backend. Unlike the Java backend, - Python-backed acquisitions currently do not automatically write data to disk. Instead, by default, - they store data in RAM which can be queried with the Dataset class. If instead you want to - implement your own data storage, you can pass an image_process_fn which diverts the data to - a custom endpoint. + Pycro-Manager acquisition that uses a Python runtime backend. """ + def __init__( self, directory: str=None, @@ -42,6 +49,9 @@ def __init__( dict(signature(PythonBackendAcquisition.__init__).parameters.items())[arg_name].default) for arg_name in arg_names } super().__init__(**named_args) + + self._engine = Engine.get_instance() + self._dataset = NDRAMDataset() if not directory else NDTiffDataset(directory, name=name, writable=True) self._finished = False self._notifications_finished = False @@ -57,19 +67,46 @@ def submit_events(): while True: event_or_events = self._event_queue.get() if event_or_events is None: - self._acq.finish() - self._acq.block_until_events_finished() + self._finish() + self._events_finished.wait() break _validate_acq_events(event_or_events) if isinstance(event_or_events, dict): event_or_events = [event_or_events] # convert to objects - event_or_events = [AcquisitionEvent.from_json(event, self._acq) for event in event_or_events] - self._acq.submit_event_iterator(iter(event_or_events)) + event_or_events = [AcquisitionEvent.from_json(event, self) for event in event_or_events] + Engine.get_instance().submit_event_iterator(iter(event_or_events)) + self._event_thread = threading.Thread(target=submit_events) self._event_thread.start() - self._acq = pymmcore_Acquisition(self._dataset) + self._events_finished = threading.Event() + self.abort_requested_ = threading.Event() + self.start_time_ms_ = -1 + self.paused_ = False + + self.event_generation_hooks_ = [] + self.before_hardware_hooks_ = [] + self.before_z_hooks_ = [] + self.after_hardware_hooks_ = [] + self.after_camera_hooks_ = [] + self.after_exposure_hooks_ = [] + self.image_processors_ = [] + + self.first_dequeue_ = queue.Queue(maxsize=IMAGE_QUEUE_SIZE) + self.processor_output_queues_ = {} + self.debug_mode_ = False + self.abort_exception_ = None + self.image_metadata_processor_ = None + self.notification_handler_ = NotificationHandler() + self.started_ = False + self.core_ = Engine.get_core() + self.data_sink_ = self._dataset + + summary_metadata = AcqEngMetadata.make_summary_metadata(self.core_, self) + + if self.data_sink_: + self.data_sink_.initialize(summary_metadata) # receive notifications from the acquisition engine. Unlike the java_backend analog # of this, the python backend does not have a separate thread for notifications because @@ -83,7 +120,7 @@ def post_notification(notification): if self._image_notification_queue.qsize() > self._image_notification_queue.maxsize * 0.9: warnings.warn(f"Acquisition image notification queue size: {self._image_notification_queue.qsize()}") - self._acq.add_acq_notification_listener(NotificationListener(post_notification)) + self._add_acq_notification_listener(NotificationListener(post_notification)) self._notification_dispatch_thread = self._start_notification_dispatcher(notification_callback_fn) @@ -114,7 +151,10 @@ def post_notification(notification): assert isinstance(napari_viewer, napari.Viewer), 'napari_viewer must be an instance of napari.Viewer' self._napari_viewer = napari_viewer start_napari_signalling(self._napari_viewer, self.get_dataset()) - self._acq.start() + + self._start_saving_thread() + self._post_notification(AcqNotification.create_acq_started_notification()) + self.started_ = True ######## Public API ########### @@ -122,12 +162,13 @@ def post_notification(notification): def await_completion(self): """Wait for acquisition to finish and resources to be cleaned up""" try: - while not self._acq.are_events_finished() or ( - self._acq.get_data_sink() is not None and not self._acq.get_data_sink().is_finished()): + while not self._are_events_finished() or ( + self._dataset is not None and not self._dataset.is_finished()): self._check_for_exceptions() - self._acq.block_until_events_finished(0.05) - if self._acq.get_data_sink() is not None: - self._acq.get_data_sink().block_until_finished(0.05) + self._events_finished.wait(0.05) + if self._dataset is not None: + self._dataset.block_until_finished(0.05) + # time.sleep(0.05) # does this prevent things from getting stuck? self._check_for_exceptions() finally: self._event_thread.join() @@ -169,6 +210,126 @@ def _are_acquisition_notifications_finished(self): """ return self._notifications_finished + + def _post_notification(self, notification): + self.notification_handler_.post_notification(notification) + + def _add_acq_notification_listener(self, post_notification_fn): + self.notification_handler_.add_listener(post_notification_fn) + + def _save_image(self, image): + if image is None: + self.data_sink_.finish() + self._post_notification(AcqNotification.create_data_sink_finished_notification()) + else: + pixels, metadata = image.pix, image.tags + axes = AcqEngMetadata.get_axes(metadata) + self.data_sink_.put_image(axes, pixels, metadata) + self._post_notification(AcqNotification.create_image_saved_notification(axes)) + + def _start_saving_thread(self): + def saving_thread(acq): + try: + while True: + if acq.debug_mode_: + acq.core_.log_message(f"Image queue size: {len(acq.first_dequeue_)}") + if not acq.image_processors_: + if acq.debug_mode_: + acq.core_.log_message("waiting for image to save") + img = acq.first_dequeue_.get() + if acq.debug_mode_: + acq.core_.log_message("got image to save") + acq._save_image(img) + if img is None: + break + else: + img = acq.processor_output_queues_[acq.image_processors_[-1]].get() + if acq.data_sink_: + if acq.debug_mode_: + acq.core_.log_message("Saving image") + if img.tags is None and img.pix is None: + break + acq._save_image(img) + if acq.debug_mode_: + acq.core_.log_message("Finished saving image") + except Exception as ex: + traceback.print_exc() + acq.abort(ex) + finally: + acq._save_image(None) + + threading.Thread(target=saving_thread, args=(self,)).start() + + + def _add_to_output(self, ti): + try: + if ti is None: + self._events_finished.set() + self.first_dequeue_.put(ti) + except Exception as ex: + raise RuntimeError(ex) + + def _finish(self): + Engine.get_instance().finish_acquisition(self) + + def _abort(self, ex): + if ex: + self.abort_exception_ = ex + if self.abort_requested_.is_set(): + return + self.abort_requested_.set() + if self.is_paused(): + self.set_paused(False) + Engine.get_instance().finish_acquisition(self) + + def _check_for_exceptions(self): + if self.abort_exception_: + raise self.abort_exception_ + + def _add_image_processor(self, p): + if self.started_: + raise RuntimeError("Cannot add processor after acquisition started") + self.image_processors_.append(p) + self.processor_output_queues_[p] = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) + if len(self.image_processors_) == 1: + p.set_acq_and_queues(self, self.first_dequeue_, self.processor_output_queues_[p]) + else: + p.set_acq_and_queues(self, self.processor_output_queues_[self.image_processors_[-2]], + self.processor_output_queues_[self.image_processors_[-1]]) + + def _add_hook(self, h, type_): + if self.started_: + raise RuntimeError("Cannot add hook after acquisition started") + if type_ == EVENT_GENERATION_HOOK: + self.event_generation_hooks_.append(h) + elif type_ == BEFORE_HARDWARE_HOOK: + self.before_hardware_hooks_.append(h) + elif type_ == BEFORE_Z_DRIVE_HOOK: + self.before_z_hooks_.append(h) + elif type_ == AFTER_HARDWARE_HOOK: + self.after_hardware_hooks_.append(h) + elif type_ == AFTER_CAMERA_HOOK: + self.after_camera_hooks_.append(h) + elif type_ == AFTER_EXPOSURE_HOOK: + self.after_exposure_hooks_.append(h) + + def _get_hooks(self, type): + if type == EVENT_GENERATION_HOOK: + return self.event_generation_hooks_ + elif type == BEFORE_HARDWARE_HOOK: + return self.before_hardware_hooks_ + elif type == BEFORE_Z_DRIVE_HOOK: + return self.before_z_hooks_ + elif type == AFTER_HARDWARE_HOOK: + return self.after_hardware_hooks_ + elif type == AFTER_CAMERA_HOOK: + return self.after_camera_hooks_ + elif type == AFTER_EXPOSURE_HOOK: + return self.after_exposure_hooks_ + + def _are_events_finished(self): + return self._events_finished.is_set() + class ImageProcessor: """ This is the equivalent of RemoteImageProcessor in the Java version. @@ -191,7 +352,7 @@ def _process(self): while True: # wait for an image to arrive tagged_image = self.input_queue.get() - if tagged_image.tags is None and tagged_image.pix is None: + if tagged_image is None: # this is a signal to stop self.output_queue.put(tagged_image) break diff --git a/pycromanager/headless.py b/pycromanager/headless.py index 84b3658a..b50e6bf7 100644 --- a/pycromanager/headless.py +++ b/pycromanager/headless.py @@ -51,40 +51,7 @@ def _create_pymmcore_instance(): # Create and return a new class that subclasses the original class and has the new attributes clz = type(CMMCore.__name__ + "SnakeCase", (CMMCore,), new_attributes) - instance = clz() - - def pop_next_tagged_image(self): - md = pymmcore.Metadata() - pix = self.pop_next_image_md(0, 0, md) - tags = {key: md.GetSingleTag(key).GetValue() for key in md.GetKeys()} - return TaggedImage(tags, pix) - - def get_tagged_image(core, cam_index, camera, height, width, binning=None, pixel_type=None, roi_x_start=None, - roi_y_start=None): - """ - Different signature than the Java version because of difference in metadata handling in the swig layers - """ - pix = core.get_image() - md = pymmcore.Metadata() - # most of the same tags from pop_next_tagged_image, which may not be the same as the MMCoreJ version of this function - tags = {'Camera': camera, 'Height': height, 'Width': width, 'PixelType': pixel_type, - 'CameraChannelIndex': cam_index} - # Could optionally add these for completeness but there might be a performance hit - if binning is not None: - tags['Binning'] = binning - if roi_x_start is not None: - tags['ROI-X-start'] = roi_x_start - if roi_y_start is not None: - tags['ROI-Y-start'] = roi_y_start - - return TaggedImage(tags, pix) - - instance.get_tagged_image = types.MethodType(get_tagged_image, instance) - instance.pop_next_tagged_image = types.MethodType(pop_next_tagged_image, instance) - - # attach TaggedImage class - instance.TaggedImage = TaggedImage return instance @@ -112,7 +79,7 @@ def stop_headless(debug=False): logger.debug('Stopping pymmcore instance') c.unloadAllDevices() if debug: - logger.debug('Unloaded all devices') + logger.debug('Unloaded all devices.py') Engine.get_instance().shutdown() if debug: logger.debug('Engine shut down') From 67c29675f1519baed84beb47dcccc154364892cc Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Thu, 20 Jun 2024 18:55:55 +0200 Subject: [PATCH 02/20] very much work in progress --- .../acquisition/acq_eng_py/internal/hooks.py | 17 ++ pycromanager/acquisition/new/__init__.py | 0 pycromanager/acquisition/new/acq_events.py | 248 ++++++++++++++++++ pycromanager/acquisition/new/devices.py | 48 ++++ pycromanager/acquisition/new/image_coords.py | 61 +++++ .../acquisition/new/sandbox_device.py | 32 +++ pycromanager/test/test_device.py | 7 + 7 files changed, 413 insertions(+) create mode 100644 pycromanager/acquisition/acq_eng_py/internal/hooks.py create mode 100644 pycromanager/acquisition/new/__init__.py create mode 100644 pycromanager/acquisition/new/acq_events.py create mode 100644 pycromanager/acquisition/new/devices.py create mode 100644 pycromanager/acquisition/new/image_coords.py create mode 100644 pycromanager/acquisition/new/sandbox_device.py create mode 100644 pycromanager/test/test_device.py diff --git a/pycromanager/acquisition/acq_eng_py/internal/hooks.py b/pycromanager/acquisition/acq_eng_py/internal/hooks.py new file mode 100644 index 00000000..e9ea5c54 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/internal/hooks.py @@ -0,0 +1,17 @@ + +EVENT_GENERATION_HOOK = 0 +# This hook runs before changes to the hardware (corresponding to the instructions in the +# event) are made +BEFORE_HARDWARE_HOOK = 1 +# This hook runs after all changes to the hardware except dor setting th Z drive have been +# made. This is useful for things such as autofocus. +BEFORE_Z_DRIVE_HOOK = 2 +# This hook runs after changes to the hardware took place, but before camera exposure +# (either a snap or a sequence) is started +AFTER_HARDWARE_HOOK = 3 +# Hook runs after the camera sequence acquisition has started. This can be used for +# external triggering of the camera +AFTER_CAMERA_HOOK = 4 +# Hook runs after the camera exposure ended (when possible, before readout of the camera +# and availability of the images in memory). +AFTER_EXPOSURE_HOOK = 5 \ No newline at end of file diff --git a/pycromanager/acquisition/new/__init__.py b/pycromanager/acquisition/new/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/acq_events.py b/pycromanager/acquisition/new/acq_events.py new file mode 100644 index 00000000..f4e613f9 --- /dev/null +++ b/pycromanager/acquisition/new/acq_events.py @@ -0,0 +1,248 @@ +from typing import Union, List, Tuple, Callable, Dict +from pydantic import BaseModel +import numpy as np +from typing_extensions import Protocol, runtime_checkable +from queue import Queue +from threading import Event +from typing import Iterable +from pycromanager.acquisition.new.devices import Camera + +from pycromanager.acquisition.new.image_coords import ImageCoordinates + + +def atomic_instruction(cls): + cls.atomic_instruction = True + return cls + +@atomic_instruction +class DeviceInstruction(BaseModel): + """ + Represents an instruction to a device. i.e. + """ + device_action: Callable # bound method of a device + # TODO: enforce that arguments must be primitives or arrays? + args: Tuple + + def execute(self): + """ + Execute the device instruction + """ + return self.device_action(*self.args) + +@atomic_instruction +class ReadoutImages(BaseModel): + """ + Readout one or more images (and associated metadata) from a camera + + Attributes: + num_images (int): The number of images to read out. + camera (Camera): The camera object to read images from. + image_coordinate_iterator (Iterable[ImageCoordinates]): An iterator or list of ImageCoordinates objects, which + specify the coordinates of the images that will be read out. + """ + num_images: int + camera: Camera + image_coordinate_iterator: Iterable[ImageCoordinates] + + def execute(self, output_queue: Queue, stop_event: Event): + """ + Readout images from the camera + """ + for image_number, image_coordinates in zip(range(self.num_images), self.image_coordinates): + while True: + if stop_event.is_set(): + self.camera.stop() + break + image, metadata = self.camera.pop_image(timeout=0.01) # only block for 10 ms so stop event can be checked + if image is not None: + output_queue.put((image_coordinates, image, metadata)) + break + if stop_event.is_set(): + break + self.camera.stop() + + +class CaptureImages(BaseModel): + """ + Special device instruction that captures images from a camera + """ + num_images: int + device: Camera + image_coordinates: ImageCoordinates # coordinates of the image(s) produced by this device instruction + + def execute(self): + """ + Capture images from the camera + """ + for _ in range(self.num_images): + self.device.arm() + self.device.start() + image, metadata = self.device.pop_image() + + + + + +class AcquisitionEvent(BaseModel): + # list of device instructions, to be executed in order + device_instructions: List[DeviceInstruction] = None + # # list of config groups to be applied + # config_group: Union[ConfigGroupSetting, List[ConfigGroupSetting]] + + + # TODO: how to handle state + # TODO: how to handle min_start_time + + + + def add_device_instructions(self, device_action: Callable, *args: Union[Tuple, List], + image_coordinates: ImageCoordinates = None + ) -> 'AcquisitionEvent': + """ + Add a device instruction to this event. A device instruction is a callable bound method of a device + and a list of arguments to pass to that method. + + :param device_action: the callable bound method of a device + :param args: the arguments to pass to the method + :param image_coordinates: If this device instruction produces an image, the coordinates of that image + """ + if self.device_instructions is None: + self.device_instructions = [] + self.device_instructions.append(DeviceInstruction(device_action=device_action, args=args, + image_coordinates=image_coordinates)) + return self + + # def _convert_to_old_style_json(self): + # data = {} + # + # # if e.is_acquisition_finished_event(): + # # data["special"] = "acquisition-end" + # # return data + # # elif e.is_acquisition_sequence_end_event(): + # # data["special"] = "sequence-end" + # # return data + # + # # TODO: restore this + # # if e.miniumumStartTime_ms_: + # # data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 + # + # if isinstance(self.config_group, list): + # raise Exception("old style events only support one config group") + # elif self.config_group is not None: + # data["config_group"] = [self.config_group.name, self.config_group.preset] + # + # for device_instruction in self.device_instructions: + # if device_instruction.device == MicroManagerCamera and device_instruction.device.action == "set_exposure": + # data["exposure"] = device_instruction.device.args[0] + # + # if self.image_coordinates: + # data["axes"] = {axis.name: axis.value for axis in self.image_coordinates} + # + # + # for device in self.device_instructions: + # if device == MicroManagerStage and device.action == "set_position": + # data["z"] = device.args[0] + # + # for device in self.device_instructions: + # if device == MicroManagerXYStage and device.action == "set_position": + # data["x"] = device.args[0] + # data["y"] = device.args[1] + # + # # if e.camera_: + # # data["camera"] = e.camera_ + # + # # get camera from device instructions + # for device in self.device_instructions: + # if device == MicroManagerCamera: + # data["camera"] = device.name + # + # # TODO device names + # # props = [[t.dev, t.prop, t.val] for t in e.properties_] + # # if props: + # # data["properties"] = props + # + # return data + # + # @staticmethod + # def create_from_old_style_json(data, acq): + # if "special" in data: + # if data["special"] == "acquisition-end": + # return AcquisitionEvent.create_acquisition_finished_event(acq) + # elif data["special"] == "sequence-end": + # return AcquisitionEvent.create_acquisition_sequence_end_event(acq) + # + # event = AcquisitionEvent(acq) + # + # if "axes" in data: + # for axisLabel, value in data["axes"].items(): + # event.axisPositions_[axisLabel] = value + # + # if "min_start_time" in data: + # event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) + # + # if "timeout_ms" in data: + # event.timeout_ms_ = float(data["timeout_ms"]) + # + # if "config_group" in data: + # event.configGroup_ = data["config_group"][0] + # event.configPreset_ = data["config_group"][1] + # + # if "exposure" in data: + # event.exposure_ = float(data["exposure"]) + # + # # if "timeout_ms" in data: + # # event.slmImage_ = float(data["timeout_ms"]) + # + # if "stage_positions" in data: + # for stagePos in data["stage_positions"]: + # event.set_stage_coordinate(stagePos[0], stagePos[1]) + # + # if "z" in data: + # event.zPosition_ = float(data["z"]) + # + # if "stage" in data: + # deviceName = data["stage"]["device_name"] + # position = data["stage"]["position"] + # event.axisPositions_[deviceName] = float(position) + # if "axis_name" in data["stage"]: + # axisName = data["stage"]["axis_name"] + # event.stageDeviceNamesToAxisNames_[deviceName] = axisName + # + # # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants + # # if isinstance(event.acquisition_, XYTiledAcquisition): + # # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( + # # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], + # # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] + # # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() + # # event.xPosition_ = xyPos.x + # # event.yPosition_ = xyPos.y + # + # if "x" in data: + # event.xPosition_ = float(data["x"]) + # + # if "y" in data: + # event.yPosition_ = float(data["y"]) + # + # if "slm_pattern" in data: + # event.slmImage_ = data["slm_pattern"] + # + # if "camera" in data: + # event.camera_ = data["camera"] + # + # if "tags" in data: + # tags = {key: value for key, value in data["tags"].items()} + # event.setTags(tags) + # + # # if "properties" in data: + # # for trip in data["properties"]: + # # t = ThreeTuple(trip[0], trip[1], trip[2]) + # # event.properties_.add(t) + # + # return event + + +class _AcquisitionFinishedEvent(BaseModel): + pass + +class _AcquisitionSequenceEndEvent(BaseModel): + pass \ No newline at end of file diff --git a/pycromanager/acquisition/new/devices.py b/pycromanager/acquisition/new/devices.py new file mode 100644 index 00000000..19e22c12 --- /dev/null +++ b/pycromanager/acquisition/new/devices.py @@ -0,0 +1,48 @@ +import numpy as np +from typing_extensions import Protocol, runtime_checkable + +@runtime_checkable +class SingleAxisMovable(Protocol): + def move(self, position: float) -> None: + ... + +@runtime_checkable +class DoubleAxisMovable(Protocol): + def move(self, x: float, y: float) -> None: + ... + +@runtime_checkable +class Camera(Protocol): + """ + Generic class for a camera and the buffer where it stores data + """ + + def set_exposure(self, exposure: float) -> None: + ... + + def get_exposure(self) -> float: + ... + + def arm(self, frame_count=None) -> None: + """ + Arms the device before an start command. This optional command validates all the current features for + consistency and prepares the device for a fast start of the Acquisition. If not used explicitly, + this command will be automatically executed at the first AcquisitionStart but will not be repeated + for the subsequent ones unless a feature is changed in the device. + """ + ... + + def start(self) -> None: + ... + + def stop(self) -> None: + ... + + def pop_image(self, timeout=None) -> (np.ndarray, dict): + """ + Get the next image and metadata from the camera buffer. If timeout is None, this function will block until + an image is available. If timeout is a number, this function will block for that many seconds before returning + (None, None) if no image is available + """ + ... + diff --git a/pycromanager/acquisition/new/image_coords.py b/pycromanager/acquisition/new/image_coords.py new file mode 100644 index 00000000..282c7058 --- /dev/null +++ b/pycromanager/acquisition/new/image_coords.py @@ -0,0 +1,61 @@ +from typing import Union, List, Tuple, Callable, Dict +from typing import Dict, Union, Optional, Iterator, List +from pydantic import BaseModel + +class ImageCoordinates(BaseModel): + """ + Represents the coordinates of an image. This is a convenience wrapper around a dictionary of axis name to axis value + where the axis value can be either an integer or a string. + """ + coordinate_dict: Dict[str, Union[int, str, Tuple[int, ...], Tuple[str, ...]]] + + def __init__(self, time: int = None, channel: str = None, z: int = None, **kwargs): + # Initialize the BaseModel (this runs Pydantic validation and parsing) + # if time/channel/z are not None, add them to the kwargs + if time is not None: + kwargs['time'] = time + if channel is not None: + kwargs['channel'] = channel + if z is not None: + kwargs['z'] = z + super().__init__(**kwargs) + + def __getitem__(self, key: str) -> Union[int, str]: + return self.coordinate_dict[key] + + def __setitem__(self, key: str, value: Union[int, str]) -> None: + self.coordinate_dict[key] = value + + def __delitem__(self, key: str) -> None: + del self.coordinate_dict[key] + + def __contains__(self, key: str) -> bool: + return key in self.coordinate_dict + + def __getattr__(self, item: str) -> Union[int, str]: + if item in self.coordinate_dict: + return self.coordinate_dict[item] + else: + raise AttributeError(f"Attribute {item} not found") + + def __setattr__(self, key: str, value: Union[int, str]) -> None: + if key == 'coordinate_dict': + super().__setattr__(key, value) + else: + self.coordinate_dict[key] = value + + def __delattr__(self, item: str) -> None: + if item in self.coordinate_dict: + del self.coordinate_dict[item] + else: + super().__delattr__(item) + +# TODO make a nicer way to implement this... +# class ImageCoordinateIterator(BaseModel): +# coordinate_dict: Dict[Tuple[str, Union[int, str, Tuple[int, ...], Tuple[str, ...]]] +# +# +# def __iter__(self) -> Iterator['ImageCoordinates']: +# +# def __next__(self) -> 'ImageCoordinates': + diff --git a/pycromanager/acquisition/new/sandbox_device.py b/pycromanager/acquisition/new/sandbox_device.py new file mode 100644 index 00000000..0ea2214c --- /dev/null +++ b/pycromanager/acquisition/new/sandbox_device.py @@ -0,0 +1,32 @@ +from pycromanager import start_headless +from pycromanager.acquisition.new.acq_events import AcquisitionEvent, ReadoutImages +from pycromanager.acquisition.new.image_coords import ImageCoordinates +from pycromanager.acquisition.acq_eng_py.mm_device_implementations import MicroManagerCamera +import os +from pycromanager import Acquisition + + +mm_install_dir = '/Users/henrypinkard/Micro-Manager' +config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') +start_headless(mm_install_dir, config_file, + buffer_size_mb=1024, max_memory_mb=1024, # set these low for github actions + python_backend=True, + debug=False) + +camera = MicroManagerCamera() + +events = [] +coord_list = [ImageCoordinates(time=t) for t in range(10)] +for coord in coord_list: + events.append(ReadoutImages(num_images=1, camera=camera, image_coordinate_iterator=coord_list)) + +with Acquisition(show_display=False, debug=True) as acq: + acq.acquire(events) + + + +# +# with Acquisition(show_display=False, debug=True) as acq: +# # copy list of events to avoid popping from original +# acq.acquire(multi_d_acquisition_events(num_time_points=10)) + diff --git a/pycromanager/test/test_device.py b/pycromanager/test/test_device.py new file mode 100644 index 00000000..bd444443 --- /dev/null +++ b/pycromanager/test/test_device.py @@ -0,0 +1,7 @@ +# +# def test_micro_manager_camera_snap(): +# camera = MicroManagerCamera() +# +# camera.arm(1) +# camera.start() +# image, metadata = camera.pop_image() \ No newline at end of file From ddbfe86af57e1c0e9dbaf47ce913700b06fd721c Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 25 Jun 2024 09:12:02 +0200 Subject: [PATCH 03/20] progress --- .../acquisition/acq_eng_py/internal/engine.py | 13 +- .../acquisition/acquisition_superclass.py | 3 +- pycromanager/acquisition/new/acq_events.py | 351 ++++++++---------- pycromanager/acquisition/new/executor.py | 194 ++++++++++ pycromanager/acquisition/new/image_coords.py | 3 +- .../mm_device_implementations.py | 22 +- .../acquisition/new/sandbox_device.py | 47 ++- .../python_backend_acquisitions.py | 4 +- pycromanager/headless.py | 8 +- 9 files changed, 416 insertions(+), 229 deletions(-) create mode 100644 pycromanager/acquisition/new/executor.py rename pycromanager/acquisition/{acq_eng_py => new}/mm_device_implementations.py (73%) diff --git a/pycromanager/acquisition/acq_eng_py/internal/engine.py b/pycromanager/acquisition/acq_eng_py/internal/engine.py index e535a2a3..01274b79 100644 --- a/pycromanager/acquisition/acq_eng_py/internal/engine.py +++ b/pycromanager/acquisition/acq_eng_py/internal/engine.py @@ -3,11 +3,14 @@ import time import datetime -from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# TODO +AcquisitionEvent = None + from pycromanager.acquisition.acq_eng_py.internal.hardware_sequences import HardwareSequences import pymmcore from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification -from pycromanager.acquisition.python_backend_acquisitions import PythonBackendAcquisition +# from pycromanager.acquisition.python_backend_acquisitions import PythonBackendAcquisition HARDWARE_ERROR_RETRIES = 6 DELAY_BETWEEN_RETRIES_MS = 5 @@ -103,7 +106,7 @@ def check_for_default_devices(self, event: AcquisitionEvent): # # return self.acq_executor.submit(process_acquisition_event_inner) - def execute_acquisition_event(self, acquisition: PythonBackendAcquisition,event: AcquisitionEvent): + def execute_acquisition_event(self, acquisition,event: AcquisitionEvent): # check if we should pause until the minimum start time of the event has occured # while event.get_minimum_start_time_absolute() is not None and \ # time.time() * 1000 < event.get_minimum_start_time_absolute(): @@ -123,7 +126,7 @@ def execute_acquisition_event(self, acquisition: PythonBackendAcquisition,event: - def acquire_images(self, acquisition : PythonBackendAcquisition, + def acquire_images(self, acquisition , event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: """ Acquire 1 or more images in a sequence, add some metadata, then @@ -146,7 +149,7 @@ def acquire_images(self, acquisition : PythonBackendAcquisition, - acquisition._add_to_output(ti) + # acquisition._add_to_output(ti) # TODO stop sequences # TODO: exceptiopn handling diff --git a/pycromanager/acquisition/acquisition_superclass.py b/pycromanager/acquisition/acquisition_superclass.py index 047972b1..de8fe465 100644 --- a/pycromanager/acquisition/acquisition_superclass.py +++ b/pycromanager/acquisition/acquisition_superclass.py @@ -18,8 +18,9 @@ from queue import Queue from typing import Generator, Dict, Union -from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# from pycromanager.acquisition.new.acq_events import AcquisitionEvent +AcquisitionEvent = None class EventQueue(Queue): """ diff --git a/pycromanager/acquisition/new/acq_events.py b/pycromanager/acquisition/new/acq_events.py index f4e613f9..56af5e4c 100644 --- a/pycromanager/acquisition/new/acq_events.py +++ b/pycromanager/acquisition/new/acq_events.py @@ -3,34 +3,150 @@ import numpy as np from typing_extensions import Protocol, runtime_checkable from queue import Queue -from threading import Event from typing import Iterable +import itertools +from abc import ABC, abstractmethod +import threading + from pycromanager.acquisition.new.devices import Camera + from pycromanager.acquisition.new.image_coords import ImageCoordinates -def atomic_instruction(cls): - cls.atomic_instruction = True - return cls -@atomic_instruction -class DeviceInstruction(BaseModel): +from pydantic import BaseModel +import uuid + + +# def atomic_instruction(cls): +# cls.atomic_instruction = True +# return cls +# +# @atomic_instruction +# class DeviceInstruction(BaseModel): +# """ +# Represents an instruction to a device. i.e. +# """ +# device_action: Callable # bound method of a device +# # TODO: enforce that arguments must be primitives or arrays? +# args: Tuple +# +# def execute(self): +# """ +# Execute the device instruction +# """ +# return self.device_action(*self.args) + +# @atomic_instruction + + +class AcquisitionFuture(BaseModel): + event: 'AcquisitionEvent' + _event_complete_condition: threading.Condition = threading.Condition() + _event_complete: bool = False + + def notify_done(self, exception: Exception): + """ + Notify the future that the event has completed + """ + with self._event_complete_condition: + self._event_complete = True + self._event_complete_condition.notify_all() + + def notify_data_acquired(self, image_coordinates: ImageCoordinates): + """ + Notify the future that data has been acquired by a data producing event. This does not mean + the event is done executing + """ + # TODO: could have the notifier grab the data from RAM if available, otherwise read it from disk + pass + + def await_execution(self): + """ + Block until the event is complete + """ + with self._event_complete_condition: + while not self._event_complete: + self._event_complete_condition.wait() + + def await_data_acquired(self): + """ + Block until data is acquired by the event, and optionally return + If the data was already acquired, read it from the dataset + """ + pass + + + +class DataOutputQueue: """ - Represents an instruction to a device. i.e. + Output queue for data (i.e. images) captured by an AcquisitionEvent """ - device_action: Callable # bound method of a device - # TODO: enforce that arguments must be primitives or arrays? - args: Tuple + _queue: Queue = Queue() + + def put(self, future: 'AcquisitionFuture', coordinates: ImageCoordinates, image: np.ndarray, metadata: Dict): + """ + Put an image and associated metadata into the queue + """ + self._queue.put((coordinates, image, metadata)) + future.notify_data_acquired(coordinates) + + def get(self): + """ + Get an image and associated metadata from the queue + """ + return self._queue.get() + +class AcquisitionEvent(BaseModel, ABC): + num_retries_on_exception: int = 0 + _exception: Exception = None + _future: AcquisitionFuture = None + + # TODO: want to make this specifc to certain attributes + class Config: + arbitrary_types_allowed = True + + @abstractmethod def execute(self): """ - Execute the device instruction + Execute the event. This event is called by the executor, and should be overriden by subclasses to implement + the event's functionality + """ + pass + + def _post_execution(self): + """ + Method that is called after the event is executed to update acquisition futures about the event's status. + This is called automatically by the Executor and should not be overriden by subclasses. + + Args: + future (AcquisitionFuture): The future associated with this event + """ + if self._future is None: + raise ValueError("Event has not been executed yet") + # notify the future that the event has completed + self._future.notify_done(self._exception) + + + +class DataProducingAcquisitionEvent(AcquisitionEvent): + """ + Special type of acquisition event that produces data + """ + data_output_queue: DataOutputQueue = None # executor will provide this at runtime + image_coordinate_iterator: Iterable[ImageCoordinates] + + def put_data(self, image_coordinates: ImageCoordinates, image: np.ndarray, metadata: Dict): + """ + Put data into the output queue """ - return self.device_action(*self.args) + self.data_output_queue.put(self._future, image_coordinates, image, metadata) -@atomic_instruction -class ReadoutImages(BaseModel): + + +class ReadoutImages(DataProducingAcquisitionEvent): """ Readout one or more images (and associated metadata) from a camera @@ -38,211 +154,40 @@ class ReadoutImages(BaseModel): num_images (int): The number of images to read out. camera (Camera): The camera object to read images from. image_coordinate_iterator (Iterable[ImageCoordinates]): An iterator or list of ImageCoordinates objects, which - specify the coordinates of the images that will be read out. + specify the coordinates of the images that will be read out, should be able to provide at least num_images + elements. """ num_images: int camera: Camera - image_coordinate_iterator: Iterable[ImageCoordinates] - def execute(self, output_queue: Queue, stop_event: Event): - """ - Readout images from the camera - """ - for image_number, image_coordinates in zip(range(self.num_images), self.image_coordinates): + def execute(self): + image_counter = itertools.count() if self.num_images is None else range(self.num_images) + for image_number, image_coordinates in zip(image_counter, self.image_coordinate_iterator): while True: - if stop_event.is_set(): - self.camera.stop() - break + # TODO: read from state to check for cancel condition + # this can be made more efficient in the future with a new image buffer that provides callbacks + # on a new image recieved so that polling can be avoided image, metadata = self.camera.pop_image(timeout=0.01) # only block for 10 ms so stop event can be checked if image is not None: - output_queue.put((image_coordinates, image, metadata)) + self.put_data(image_coordinates, image, metadata) break - if stop_event.is_set(): - break - self.camera.stop() -class CaptureImages(BaseModel): +class StartCapture(AcquisitionEvent): """ Special device instruction that captures images from a camera """ num_images: int - device: Camera - image_coordinates: ImageCoordinates # coordinates of the image(s) produced by this device instruction + camera: Camera def execute(self): """ Capture images from the camera """ - for _ in range(self.num_images): - self.device.arm() - self.device.start() - image, metadata = self.device.pop_image() - - - - - -class AcquisitionEvent(BaseModel): - # list of device instructions, to be executed in order - device_instructions: List[DeviceInstruction] = None - # # list of config groups to be applied - # config_group: Union[ConfigGroupSetting, List[ConfigGroupSetting]] - - - # TODO: how to handle state - # TODO: how to handle min_start_time - - - - def add_device_instructions(self, device_action: Callable, *args: Union[Tuple, List], - image_coordinates: ImageCoordinates = None - ) -> 'AcquisitionEvent': - """ - Add a device instruction to this event. A device instruction is a callable bound method of a device - and a list of arguments to pass to that method. - - :param device_action: the callable bound method of a device - :param args: the arguments to pass to the method - :param image_coordinates: If this device instruction produces an image, the coordinates of that image - """ - if self.device_instructions is None: - self.device_instructions = [] - self.device_instructions.append(DeviceInstruction(device_action=device_action, args=args, - image_coordinates=image_coordinates)) - return self - - # def _convert_to_old_style_json(self): - # data = {} - # - # # if e.is_acquisition_finished_event(): - # # data["special"] = "acquisition-end" - # # return data - # # elif e.is_acquisition_sequence_end_event(): - # # data["special"] = "sequence-end" - # # return data - # - # # TODO: restore this - # # if e.miniumumStartTime_ms_: - # # data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 - # - # if isinstance(self.config_group, list): - # raise Exception("old style events only support one config group") - # elif self.config_group is not None: - # data["config_group"] = [self.config_group.name, self.config_group.preset] - # - # for device_instruction in self.device_instructions: - # if device_instruction.device == MicroManagerCamera and device_instruction.device.action == "set_exposure": - # data["exposure"] = device_instruction.device.args[0] - # - # if self.image_coordinates: - # data["axes"] = {axis.name: axis.value for axis in self.image_coordinates} - # - # - # for device in self.device_instructions: - # if device == MicroManagerStage and device.action == "set_position": - # data["z"] = device.args[0] - # - # for device in self.device_instructions: - # if device == MicroManagerXYStage and device.action == "set_position": - # data["x"] = device.args[0] - # data["y"] = device.args[1] - # - # # if e.camera_: - # # data["camera"] = e.camera_ - # - # # get camera from device instructions - # for device in self.device_instructions: - # if device == MicroManagerCamera: - # data["camera"] = device.name - # - # # TODO device names - # # props = [[t.dev, t.prop, t.val] for t in e.properties_] - # # if props: - # # data["properties"] = props - # - # return data - # - # @staticmethod - # def create_from_old_style_json(data, acq): - # if "special" in data: - # if data["special"] == "acquisition-end": - # return AcquisitionEvent.create_acquisition_finished_event(acq) - # elif data["special"] == "sequence-end": - # return AcquisitionEvent.create_acquisition_sequence_end_event(acq) - # - # event = AcquisitionEvent(acq) - # - # if "axes" in data: - # for axisLabel, value in data["axes"].items(): - # event.axisPositions_[axisLabel] = value - # - # if "min_start_time" in data: - # event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) - # - # if "timeout_ms" in data: - # event.timeout_ms_ = float(data["timeout_ms"]) - # - # if "config_group" in data: - # event.configGroup_ = data["config_group"][0] - # event.configPreset_ = data["config_group"][1] - # - # if "exposure" in data: - # event.exposure_ = float(data["exposure"]) - # - # # if "timeout_ms" in data: - # # event.slmImage_ = float(data["timeout_ms"]) - # - # if "stage_positions" in data: - # for stagePos in data["stage_positions"]: - # event.set_stage_coordinate(stagePos[0], stagePos[1]) - # - # if "z" in data: - # event.zPosition_ = float(data["z"]) - # - # if "stage" in data: - # deviceName = data["stage"]["device_name"] - # position = data["stage"]["position"] - # event.axisPositions_[deviceName] = float(position) - # if "axis_name" in data["stage"]: - # axisName = data["stage"]["axis_name"] - # event.stageDeviceNamesToAxisNames_[deviceName] = axisName - # - # # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants - # # if isinstance(event.acquisition_, XYTiledAcquisition): - # # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( - # # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], - # # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] - # # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() - # # event.xPosition_ = xyPos.x - # # event.yPosition_ = xyPos.y - # - # if "x" in data: - # event.xPosition_ = float(data["x"]) - # - # if "y" in data: - # event.yPosition_ = float(data["y"]) - # - # if "slm_pattern" in data: - # event.slmImage_ = data["slm_pattern"] - # - # if "camera" in data: - # event.camera_ = data["camera"] - # - # if "tags" in data: - # tags = {key: value for key, value in data["tags"].items()} - # event.setTags(tags) - # - # # if "properties" in data: - # # for trip in data["properties"]: - # # t = ThreeTuple(trip[0], trip[1], trip[2]) - # # event.properties_.add(t) - # - # return event - - -class _AcquisitionFinishedEvent(BaseModel): - pass - -class _AcquisitionSequenceEndEvent(BaseModel): - pass \ No newline at end of file + try: + self.camera.arm(self.num_images) + self.camera.start() + except Exception as e: + self.camera.stop() + raise e + diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/acquisition/new/executor.py new file mode 100644 index 00000000..a8f7aaee --- /dev/null +++ b/pycromanager/acquisition/new/executor.py @@ -0,0 +1,194 @@ +""" +Class that executes acquistion events across a pool of threads +""" + +import threading +from collections import deque +from typing import Deque +import warnings +import traceback +from pydantic import BaseModel +import time +import uuid + +from pycromanager.acquisition.new.acq_events import AcquisitionFuture +from pycromanager.acquisition.new.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent + + +class _ExecutionThreadManager(BaseModel): + """ + Class which manages a single thread that executes events from a queue, one at a time. Events can be added + to either end of the queue, in order to prioritize them. The thread will stop when the shutdown method is called, + or in the event of an unhandled exception during event execution. + + This class handles thread safety so that it is possible to check if the thread has any currently executing events + or events in its queue with the is_free method. + + """ + _deque: Deque[AcquisitionEvent] + + def __init__(self): + super().__init__() + self._thread = threading.Thread(target=self._run_thread) + self._deque = deque() + self._shutdown_event = threading.Event() + self._terminate_event = threading.Event() + self._exception = None + self._event_executing = False + self._addition_condition = threading.Condition() + self._thread.start() + + def join(self): + self._thread.join() + + def _run_thread(self): + event = None + while True: + if self._terminate_event.is_set(): + return + if self._shutdown_event.is_set() and self.is_free(): + return + # Event retrieval loop + while event is None: + with self._addition_condition: + if not self._deque: + # wait until something is in the queue + self._addition_condition.wait() + if self._terminate_event.is_set(): + return + if self._shutdown_event.is_set() and not self._deque: + # awoken by a shutdown event and the queue is empty + return + event = self._deque.popleft() + if not hasattr(event, 'num_retries_on_exception'): + warnings.warn("Event does not have num_retries_on_exception attribute, setting to 0") + event.num_retries_on_exception = 0 + num_retries = event.num_retries_on_exception + self._event_executing = True + + # Event execution loop + while True: + try: + event.execute() + event._post_execution() # notify futures + with self._addition_condition: + self._event_executing = False + break + except Exception as e: + if num_retries > 0: + if self._terminate_event.is_set(): + return + num_retries -= 1 + warnings.warn(f"Exception during event execution, retrying {num_retries} more times") + traceback.print_exc() + else: + event._exception = e + event._post_execution() # notify futures + with self._addition_condition: + self._event_executing = False + raise e # re-raise the exception to stop the thread + event = None + + def is_free(self): + """ + return true if an event is not currently being executed and the queue is empty + """ + with self._addition_condition: + return not self._event_executing and not self._deque and not \ + self._terminate_event.is_set() and not self._shutdown_event.is_set() + + def submit_event(self, event, prioritize=False): + """ + Submit an event for execution on this thread. If prioritize is True, the event will be executed before any other + events in the queue. + + Returns: + uuid.UUID: A unique identifier for the event, which can be used to check if the event has been executed + """ + if event._uuid is not None: + warnings.warn("Event has already been executed. Re-executing may lead to unexpected behavior") + event._uuid = uuid.uuid1() + with self._addition_condition: + if self._shutdown_event.is_set() or self._terminate_event.is_set(): + raise RuntimeError("Cannot submit event to a thread that has been shutdown") + if prioritize: + self._deque.appendleft(event) + else: + self._deque.append(event) + self._addition_condition.notify_all() + return event._uuid + + + def terminate(self): + """ + Stop the thread immediately, without waiting for the current event to finish + """ + with self._addition_condition: + self._terminate_event.set() + self._shutdown_event.set() + self._addition_condition.notify_all() + self._thread.join() + def shutdown(self): + """ + Stop the thread and wait for it to finish + """ + with self._addition_condition: + self._shutdown_event.set() + self._addition_condition.notify_all() + self._thread.join() + + +class AcquisitionEventExecutor: + def __init__(self, num_threads=1): + self._threads = [] + for _ in range(num_threads): + self._start_new_thread() + + def _start_new_thread(self): + self._threads.append(_ExecutionThreadManager()) + + def submit_event(self, event, prioritize=False, use_free_thread=False, data_output_queue=None): + """ + Submit an event for execution on one of the active threads. By default, all events will be executed + on a single thread in the order they were submitted. This is the simplest way to prevent concurrency issues + with hardware devices. With thread-safe code, events can be parallelized by submitting them to different threads + using the use_free_thread argument. By default, events will be executed in the order they were submitted, but + if prioritize is set to True, the event will be executed before any other events in the queue on its thread. + + Parameters: + event (AcquisitionEvent): The event to execute + prioritize (bool): If True, the event will be executed before any other events queued on its execution thread + use_free_thread (bool): If True, the event will be executed on a thread that is not currently executing + and has nothing in its queue, creating a new thread if necessary. This is needed, for example, when using + an event to cancel or stop another event that is awaiting a stop signal to be rewritten to the state. If + this is set to False (the default), the event will be executed on the primary thread. + data_output_queue (DataOutputQueue): The queue to put data into if the event produces data + """ + # check that DataProducingAcquisitionEvents have a data output queue + if isinstance(event, DataProducingAcquisitionEvent) and data_output_queue is None: + raise ValueError("DataProducingAcquisitionEvent must have a data_output_queue argument") + + future = AcquisitionFuture(event) + event._future = future + if use_free_thread: + for thread in self._threads: + if thread.is_free(): + thread.submit_event(event) + break + self._start_new_thread() + self._threads[-1].submit_event(event) + else: + self._threads[0].submit_event(event, prioritize=prioritize) + + return future + + + + def shutdown(self): + """ + Stop all threads and wait for them to finish + """ + for thread in self._threads: + thread.shutdown() + for thread in self._threads: + thread.join() \ No newline at end of file diff --git a/pycromanager/acquisition/new/image_coords.py b/pycromanager/acquisition/new/image_coords.py index 282c7058..3128d149 100644 --- a/pycromanager/acquisition/new/image_coords.py +++ b/pycromanager/acquisition/new/image_coords.py @@ -1,13 +1,14 @@ from typing import Union, List, Tuple, Callable, Dict from typing import Dict, Union, Optional, Iterator, List from pydantic import BaseModel +from pydantic.fields import Field class ImageCoordinates(BaseModel): """ Represents the coordinates of an image. This is a convenience wrapper around a dictionary of axis name to axis value where the axis value can be either an integer or a string. """ - coordinate_dict: Dict[str, Union[int, str, Tuple[int, ...], Tuple[str, ...]]] + coordinate_dict: Dict[str, Union[int, str, Tuple[int, ...], Tuple[str, ...]]] = Field(default_factory=dict) def __init__(self, time: int = None, channel: str = None, z: int = None, **kwargs): # Initialize the BaseModel (this runs Pydantic validation and parsing) diff --git a/pycromanager/acquisition/acq_eng_py/mm_device_implementations.py b/pycromanager/acquisition/new/mm_device_implementations.py similarity index 73% rename from pycromanager/acquisition/acq_eng_py/mm_device_implementations.py rename to pycromanager/acquisition/new/mm_device_implementations.py index 9cb36eb6..1646b180 100644 --- a/pycromanager/acquisition/acq_eng_py/mm_device_implementations.py +++ b/pycromanager/acquisition/new/mm_device_implementations.py @@ -2,11 +2,12 @@ Implementation of Micro-Manager devices.py in terms of the AcqEng bottom API """ -from pycromanager.acquisition.acq_eng_py.device_api import SingleAxisMovable, DoubleAxisMovable, Camera +from pycromanager.acquisition.new.devices import SingleAxisMovable, DoubleAxisMovable, Camera from pycromanager.core import Core import numpy as np import pymmcore import time +from concurrent.futures import ThreadPoolExecutor @@ -31,6 +32,11 @@ def __init__(self, device_name=None): raise ValueError(f"Camera {device_name} not found") self.device_name = device_name + # Make a thread to execute calls to snap asynchronously + # This may be removable in the the future with the new camera API if something similar is implemented at the core + self._snap_executor = ThreadPoolExecutor(max_workers=1) + self._last_snap = None + def set_exposure(self, exposure: float) -> None: self._core.set_exposure(self.device_name, exposure) @@ -46,13 +52,13 @@ def arm(self, frame_count=None) -> None: # No need to prepare for continuous sequence acquisition pass else: - self._core.prepare_acquisition() + self._core.prepare_sequence_acquisition(self.device_name) self._frame_count = 1 def start(self) -> None: if self._frame_count == 1: - # TODO: put this on a different thread so it can return immediately - self._core.snap_image() + # Execute this on a separate thread because it blocks + self._last_snap = self._snap_executor.submit(lambda : self._core.snap_image()) elif self._frame_count is None: # set core camera to this camera because there's no version of this call where you specify the camera self._core.set_camera_device(self.device_name) @@ -61,7 +67,8 @@ def start(self) -> None: self._core.start_sequence_acquisition(self._frame_count, 0, True) def stop(self) -> None: - self._core.stop_acquisition() + # This will stop sequences. There is not way to stop snap_image + self._core.stop_sequence_acquisition(self.device_name) def pop_image(self, timeout=None) -> (np.ndarray, dict): if self._frame_count != 1: @@ -73,6 +80,8 @@ def pop_image(self, timeout=None) -> (np.ndarray, dict): break # sleep for the shortest possible time, only to allow the thread to be interrupted and prevent # GIL weirdness. But perhaps this is not necessary + # Reading out images should be the highest priority and thus should not be sleeping + # This could all be made more efficient in the future with callbacks coming from the C level time.sleep(0.000001) if timeout is not None and time.time() - start_time > timeout: return None, None @@ -80,6 +89,9 @@ def pop_image(self, timeout=None) -> (np.ndarray, dict): metadata = {key: md.GetSingleTag(key).GetValue() for key in md.GetKeys()} return pix, metadata else: + # wait for the snap to finish + self._last_snap.result() + # Is there no metadata when calling snapimage? metadata = {} return self._core.get_image(), metadata \ No newline at end of file diff --git a/pycromanager/acquisition/new/sandbox_device.py b/pycromanager/acquisition/new/sandbox_device.py index 0ea2214c..6c43643d 100644 --- a/pycromanager/acquisition/new/sandbox_device.py +++ b/pycromanager/acquisition/new/sandbox_device.py @@ -1,10 +1,9 @@ +import time + from pycromanager import start_headless -from pycromanager.acquisition.new.acq_events import AcquisitionEvent, ReadoutImages from pycromanager.acquisition.new.image_coords import ImageCoordinates -from pycromanager.acquisition.acq_eng_py.mm_device_implementations import MicroManagerCamera +from pycromanager.acquisition.new.mm_device_implementations import MicroManagerCamera import os -from pycromanager import Acquisition - mm_install_dir = '/Users/henrypinkard/Micro-Manager' config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') @@ -13,15 +12,43 @@ python_backend=True, debug=False) + camera = MicroManagerCamera() -events = [] -coord_list = [ImageCoordinates(time=t) for t in range(10)] -for coord in coord_list: - events.append(ReadoutImages(num_images=1, camera=camera, image_coordinate_iterator=coord_list)) -with Acquisition(show_display=False, debug=True) as acq: - acq.acquire(events) +from pycromanager.acquisition.new.executor import AcquisitionEventExecutor +executor = AcquisitionEventExecutor() + + +from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataOutputQueue + +num_images = 100 +data_output_queue = DataOutputQueue() + +start_capture_event = StartCapture(num_images=num_images, camera=camera) +readout_images_event = ReadoutImages(num_images=num_images, camera=camera, + image_coordinate_iterator=[ImageCoordinates(time=t) for t in range(num_images)], + output_queue=data_output_queue) + +executor.submit_event(start_capture_event) +executor.submit_event(readout_images_event, use_free_thread=True) + +image_count = 0 +while True: + coordinates, image, metadata = data_output_queue.get() + image_count += 1 + print(f"Got image {image_count} ", f'pixel mean {image.mean()}' ) + + + +# +# events = [] +# coord_list = [ImageCoordinates(time=t) for t in range(10)] +# for coord in coord_list: +# events.append(ReadoutImages(num_images=1, camera=camera, image_coordinate_iterator=coord_list)) +# +# with Acquisition(show_display=False, debug=True) as acq: +# acq.acquire(events) diff --git a/pycromanager/acquisition/python_backend_acquisitions.py b/pycromanager/acquisition/python_backend_acquisitions.py index 211a0175..37aa63ea 100644 --- a/pycromanager/acquisition/python_backend_acquisitions.py +++ b/pycromanager/acquisition/python_backend_acquisitions.py @@ -1,7 +1,9 @@ import warnings from docstring_inheritance import NumpyDocstringInheritanceMeta from pycromanager.acquisition.acquisition_superclass import _validate_acq_events, Acquisition -from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# from pycromanager.acquisition.new.acq_events import AcquisitionEvent +#TODO: +AcquisitionEvent = None from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification from pycromanager.acquisition.acq_eng_py.internal.notification_handler import NotificationHandler diff --git a/pycromanager/headless.py b/pycromanager/headless.py index b50e6bf7..e7e0c4be 100644 --- a/pycromanager/headless.py +++ b/pycromanager/headless.py @@ -6,7 +6,7 @@ import types import os -from pycromanager.acquisition.acq_eng_py.internal.engine import Engine +# from pycromanager.acquisition.acq_eng_py.internal.engine import Engine from pymmcore import CMMCore import pymmcore from pyjavaz import DEFAULT_BRIDGE_PORT, server_terminated @@ -80,7 +80,8 @@ def stop_headless(debug=False): c.unloadAllDevices() if debug: logger.debug('Unloaded all devices.py') - Engine.get_instance().shutdown() + # TODO: shutdown new engine + # Engine.get_instance().shutdown() if debug: logger.debug('Engine shut down') _PYMMCORES.clear() @@ -136,7 +137,8 @@ def start_headless( mmc.load_system_configuration(config_file) mmc.set_circular_buffer_memory_footprint(buffer_size_mb) _PYMMCORES.append(mmc) # Store so it doesn't get garbage collected - Engine(mmc) + # TODO: startup new engine + # Engine(mmc) else: classpath = mm_app_path + '/plugins/Micro-Manager/*' if java_loc is None: From bce90fb53818e38123da08756bd41baebe75074c Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 25 Jun 2024 09:12:14 +0200 Subject: [PATCH 04/20] progress --- .../new/test/t3st_event_execution.py | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 pycromanager/acquisition/new/test/t3st_event_execution.py diff --git a/pycromanager/acquisition/new/test/t3st_event_execution.py b/pycromanager/acquisition/new/test/t3st_event_execution.py new file mode 100644 index 00000000..ddbbe0d3 --- /dev/null +++ b/pycromanager/acquisition/new/test/t3st_event_execution.py @@ -0,0 +1,64 @@ +import time + +from pycromanager import start_headless +from pycromanager.acquisition.new.image_coords import ImageCoordinates +from pycromanager.acquisition.new.mm_device_implementations import MicroManagerCamera +import os +from pycromanager.acquisition.new.executor import AcquisitionEventExecutor +from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataOutputQueue + + +mm_install_dir = '/Users/henrypinkard/Micro-Manager' +config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') +start_headless(mm_install_dir, config_file, + buffer_size_mb=1024, max_memory_mb=1024, # set these low for github actions + python_backend=True, + debug=False) + + + +executor = AcquisitionEventExecutor() + + + +camera = MicroManagerCamera() + + +num_images = 100 +data_output_queue = DataOutputQueue() + +start_capture_event = StartCapture(num_images=num_images, camera=camera) +readout_images_event = ReadoutImages(num_images=num_images, camera=camera, + image_coordinate_iterator=[ImageCoordinates(time=t) for t in range(num_images)], + output_queue=data_output_queue) + +executor.submit_event(start_capture_event) +executor.submit_event(readout_images_event) + +image_count = 0 +while True: + coordinates, image, metadata = data_output_queue.get() + image_count += 1 + print(f"Got image {image_count} ", f'pixel mean {image.mean()}' ) + if image_count == num_images: + break + +executor.shutdown() + + +# +# events = [] +# coord_list = [ImageCoordinates(time=t) for t in range(10)] +# for coord in coord_list: +# events.append(ReadoutImages(num_images=1, camera=camera, image_coordinate_iterator=coord_list)) +# +# with Acquisition(show_display=False, debug=True) as acq: +# acq.acquire(events) + + + +# +# with Acquisition(show_display=False, debug=True) as acq: +# # copy list of events to avoid popping from original +# acq.acquire(multi_d_acquisition_events(num_time_points=10)) + From 43c1b12d4614254b98c412bafbfafc0ae29e8b39 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 25 Jun 2024 22:26:38 +0200 Subject: [PATCH 05/20] progress: implemented asynchrnous await system --- pycromanager/acquisition/new/acq_events.py | 326 +++++++++++------- pycromanager/acquisition/new/data_coords.py | 146 ++++++++ pycromanager/acquisition/new/data_handler.py | 173 ++++++++++ pycromanager/acquisition/new/data_storage.py | 164 +++++++++ pycromanager/acquisition/new/devices.py | 3 + pycromanager/acquisition/new/executor.py | 10 +- pycromanager/acquisition/new/image_coords.py | 62 ---- .../new/implementations/__init__.py | 0 .../data_storage_implementations.py | 10 + .../implementations/event_implementations.py | 56 +++ .../mm_device_implementations.py | 0 pycromanager/acquisition/new/test/__init__.py | 0 .../new/{ => test}/sandbox_device.py | 12 +- .../new/test/t3st_event_execution.py | 12 +- 14 files changed, 772 insertions(+), 202 deletions(-) create mode 100644 pycromanager/acquisition/new/data_coords.py create mode 100644 pycromanager/acquisition/new/data_handler.py create mode 100644 pycromanager/acquisition/new/data_storage.py delete mode 100644 pycromanager/acquisition/new/image_coords.py create mode 100644 pycromanager/acquisition/new/implementations/__init__.py create mode 100644 pycromanager/acquisition/new/implementations/data_storage_implementations.py create mode 100644 pycromanager/acquisition/new/implementations/event_implementations.py rename pycromanager/acquisition/new/{ => implementations}/mm_device_implementations.py (100%) create mode 100644 pycromanager/acquisition/new/test/__init__.py rename pycromanager/acquisition/new/{ => test}/sandbox_device.py (84%) diff --git a/pycromanager/acquisition/new/acq_events.py b/pycromanager/acquisition/new/acq_events.py index 56af5e4c..2ae7141d 100644 --- a/pycromanager/acquisition/new/acq_events.py +++ b/pycromanager/acquisition/new/acq_events.py @@ -1,52 +1,42 @@ -from typing import Union, List, Tuple, Callable, Dict -from pydantic import BaseModel +from typing import Union, List, Tuple, Callable, Dict, Set, Optional, Any, Sequence import numpy as np -from typing_extensions import Protocol, runtime_checkable from queue import Queue from typing import Iterable -import itertools from abc import ABC, abstractmethod import threading +import weakref +import warnings -from pycromanager.acquisition.new.devices import Camera - - -from pycromanager.acquisition.new.image_coords import ImageCoordinates - - +from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.acquisition.new.data_storage import DataStorageAPI +from pycromanager.acquisition.new.data_handler import DataHandler from pydantic import BaseModel -import uuid +from pydantic import field_validator # def atomic_instruction(cls): # cls.atomic_instruction = True # return cls -# -# @atomic_instruction -# class DeviceInstruction(BaseModel): -# """ -# Represents an instruction to a device. i.e. -# """ -# device_action: Callable # bound method of a device -# # TODO: enforce that arguments must be primitives or arrays? -# args: Tuple -# -# def execute(self): -# """ -# Execute the device instruction -# """ -# return self.device_action(*self.args) - -# @atomic_instruction - - -class AcquisitionFuture(BaseModel): - event: 'AcquisitionEvent' - _event_complete_condition: threading.Condition = threading.Condition() - _event_complete: bool = False - - def notify_done(self, exception: Exception): + + +class AcquisitionFuture: + + def __init__(self, event: Union['AcquisitionEvent', 'DataProducingAcquisitionEvent'], data_handler: DataHandler): + self._event = event + event._set_future(self) # so that the event can notify the future when it is done and when data is acquired + self._data_handler = data_handler + self._event_complete_condition = threading.Condition() + self._data_notification_condition = threading.Condition() + self._event_complete = False + self._acquired_data_coordinates: Set[DataCoordinates] = set() + self._processed_data_coordinates: Set[DataCoordinates] = set() + self._saved_data_coordinates: Set[DataCoordinates] = set() + self._awaited_acquired_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} + self._awaited_processed_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} + self._awaited_saved_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} + + def _notify_execution_complete(self, exception: Exception): """ Notify the future that the event has completed """ @@ -54,14 +44,6 @@ def notify_done(self, exception: Exception): self._event_complete = True self._event_complete_condition.notify_all() - def notify_data_acquired(self, image_coordinates: ImageCoordinates): - """ - Notify the future that data has been acquired by a data producing event. This does not mean - the event is done executing - """ - # TODO: could have the notifier grab the data from RAM if available, otherwise read it from disk - pass - def await_execution(self): """ Block until the event is complete @@ -70,41 +52,169 @@ def await_execution(self): while not self._event_complete: self._event_complete_condition.wait() - def await_data_acquired(self): + def _notify_data(self, image_coordinates: DataCoordinates, data, metadata, processed=False, saved=False): """ - Block until data is acquired by the event, and optionally return - If the data was already acquired, read it from the dataset - """ - pass - - - -class DataOutputQueue: - """ - Output queue for data (i.e. images) captured by an AcquisitionEvent - """ - _queue: Queue = Queue() + Notify the future that data has been acquired by a data producing event. This does not mean + the event is done executing. It also does not mean the data has been stored yet. It is simply + in an output queue waiting to be gotten by the image storage/image/processing thread - def put(self, future: 'AcquisitionFuture', coordinates: ImageCoordinates, image: np.ndarray, metadata: Dict): + Args: + image_coordinates: The coordinates of the acquired data + data: The data itself + metadata: Metadata associated with the data + processed: Whether the data has been processed + saved: Whether the data has been saved """ - Put an image and associated metadata into the queue + with self._data_notification_condition: + # pass the data to the function that is waiting on it + if not processed and not saved: + self._acquired_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_acquired_data: + self._awaited_acquired_data[ + image_coordinates] = (data if self._awaited_acquired_data[image_coordinates][0] else None, + metadata if self._awaited_acquired_data[image_coordinates][1] else None) + elif processed and not saved: + self._processed_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_processed_data: + self._awaited_processed_data[ + image_coordinates] = (data if self._awaited_processed_data[image_coordinates][0] else None, + metadata if self._awaited_processed_data[image_coordinates][1] else None) + elif processed and saved: + self._saved_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_saved_data: + self._awaited_saved_data[ + image_coordinates] = (data if self._awaited_saved_data[image_coordinates][0] else None, + metadata if self._awaited_saved_data[image_coordinates][1] else None) + else: + raise ValueError("Invalid arguments") + self._data_notification_condition.notify_all() + + def _check_if_coordinates_possible(self, coordinates): """ - self._queue.put((coordinates, image, metadata)) - future.notify_data_acquired(coordinates) - - def get(self): + Check if the given coordinates are possible for this event. raise a ValueError if not """ - Get an image and associated metadata from the queue + possible = self._event.image_coordinate_iterator.might_produce_coordinates(coordinates) + if possible is False: + raise ValueError("This event is not expected to produce the given coordinates") + elif possible is None: + # TODO: suggest a better way to do this (ie a smart generator that knows if produced coordinates are valid) + warnings.warn("This event may not produce the given coordinates") + + + # TODO: write tests for this with returning data, metadata, and both, and neither + # Also try adding in a big delay in the queue or image saving and make sure it still works + def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Union[int, str]], + DataCoordinatesIterator, Sequence[DataCoordinates], + Sequence[Dict[str, Union[int, str]]]]], + return_data: bool = False, return_metadata: bool = False, + data_processed: bool = False, data_stored: bool = False): + """ + Block until the event's data is acquired/processed/saved, and optionally return the data/metadata. + when waiting for the data to be acquired (i.e. before it is processed), since there is no way to guarantee that + this function is called before the data is acquired, the data may have already been saved and not readily + available in RAM. In this case, the data will be read from disk. + + Args: + coordinates: A single DataCoordinates object/dictionary, or Sequence (i.e. list or tuple) of DataCoordinates + objects/dictionaries. If None, this function will block until the next data is acquired/processed/saved + return_data: whether to return the data + return_metadata: whether to return the metadata + data_processed: whether to wait until data has been processed. If not data processor is in use, + then this parameter has no effect + data_stored: whether to wait for data that has been saved """ - return self._queue.get() + # Check if this event produces data + if not isinstance(self._event, DataProducingAcquisitionEvent): + raise ValueError("This event does not produce data") + + coordinates_iterator = DataCoordinatesIterator.create(coordinates) + # check that an infinite number of images is not requested + if not coordinates_iterator.is_finite(): + raise ValueError("Cannot wait for an infinite number of images") + + # Iterate through all of the requested images, if they haven't yet been acquired/processed/saved, register them + # in awaited_data so that they'll be hung onto if they arrive while this method is running. This may avoid + # having to retrieve them from disk later + result = {} + to_read = set() + with self._data_notification_condition: + # lock to avoid inconsistencies with the data that is being awaited + for data_coordinates in coordinates_iterator: + if not data_processed and not data_stored: + # make sure this is a valid thing to wait for. This can only be done before processing and + # storage, because processors and data storage classes may optionally modify the data + self._check_if_coordinates_possible(coordinates) + if data_coordinates not in self._acquired_data_coordinates: + # register that we're awaiting this data, so that if it arrives on the other thread while other + # images are being read from disk, it will be hung onto in memory, thereby avoid unnecessary reads + self._awaited_acquired_data[coordinates] = (return_data, return_metadata) + else: + to_read.add(data_coordinates) + elif data_processed and not data_stored: + if data_coordinates not in self._processed_data_coordinates: + self._awaited_processed_data[coordinates] = (return_data, return_metadata) + else: + to_read.add(data_coordinates) + else: # data stored + if data_coordinates not in self._saved_data_coordinates: + self._awaited_saved_data[coordinates] = (return_data, return_metadata) + else: + to_read.add(data_coordinates) + + # retrieve any data that has already passed through the pipeline from the data storage, via the data handler + for data_coordinates in to_read: + data, metadata = self._data_handler.get(data_coordinates, return_data, return_metadata) + # save memory for a potential big retrieval + result[data_coordinates] = (data if return_data else None, metadata if return_metadata else None) + + # now that we've gotten all the data from storage that was missed before this method was called, + # proceed to getting all the data was awaited on another thread + with self._data_notification_condition: + # order doesn't matter here because we're just grabbing it all from RAM + if not data_processed and not data_stored: + for data_coordinates in self._awaited_acquired_data.keys(): + data = return_data + while data is True or data is False: # once the data is no longer a boolean, it's the actual data + self._data_notification_condition.wait() + data, metadata = self._awaited_acquired_data[data_coordinates] + # remove from temporary storage and put into result + result[data_coordinates] = self._awaited_acquired_data.pop(data_coordinates) + + # Same thing for other steps in the pipeline + for data_coordinates in self._awaited_processed_data.keys(): + data = return_data + while data is True or data is False: + self._data_notification_condition.wait() + data, metadata = self._awaited_processed_data[data_coordinates] + result[data_coordinates] = self._awaited_processed_data.pop(data_coordinates) + + for data_coordinates in self._awaited_saved_data.keys(): + data = return_data + while data is True or data is False: + self._data_notification_condition.wait() + data, metadata = self._awaited_saved_data[data_coordinates] + result[data_coordinates] = self._awaited_saved_data.pop(data_coordinates) + + # Now package the result up + all_data, all_metadata = zip(*result.values()) + # if the original coordinates were not a sequence, don't return a sequence + if not isinstance(coordinates, dict) or not isinstance(coordinates, DataCoordinates): + all_data = all_data[0] + all_metadata = all_metadata[0] + if return_data and return_metadata: + return all_data, all_metadata + elif return_data: + return all_data + elif return_metadata: + return all_metadata class AcquisitionEvent(BaseModel, ABC): num_retries_on_exception: int = 0 _exception: Exception = None - _future: AcquisitionFuture = None + _future_weakref: Optional[weakref.ReferenceType[AcquisitionFuture]] = None - # TODO: want to make this specifc to certain attributes + # TODO: want to make this specific to certain attributes class Config: arbitrary_types_allowed = True @@ -116,6 +226,14 @@ def execute(self): """ pass + def _set_future(self, future: AcquisitionFuture): + """ + Called by the executor to set the future associated with this event + """ + # Store this as a weakref so that if user code does not hold a reference to the future, + # it can be garbage collected. The event should not give access to the future to user code + self._future_weakref = weakref.ref(future) + def _post_execution(self): """ Method that is called after the event is executed to update acquisition futures about the event's status. @@ -124,70 +242,36 @@ def _post_execution(self): Args: future (AcquisitionFuture): The future associated with this event """ - if self._future is None: + if self._future_weakref is None: raise ValueError("Event has not been executed yet") - # notify the future that the event has completed - self._future.notify_done(self._exception) + future = self._future_weakref() + if future is not None: + future._notify_execution_complete(self._exception) class DataProducingAcquisitionEvent(AcquisitionEvent): """ - Special type of acquisition event that produces data + Special type of acquisition event that produces data. It must be passed an image_coordinate_iterator + object that generates the coordinates of each piece of data (i.e. image) that will be produced by the event. + For example, {time: 0}, {time: 1}, {time: 2} for a time series acquisition. """ - data_output_queue: DataOutputQueue = None # executor will provide this at runtime - image_coordinate_iterator: Iterable[ImageCoordinates] - - def put_data(self, image_coordinates: ImageCoordinates, image: np.ndarray, metadata: Dict): + _data_handler: DataHandler = None # executor will provide this at runtime + # This is eventually an ImageCoordinatesIterator. If an Iterable[ImageCoordinates] or + # Iterable[Dict[str, Union[int, str]]] is provided, it will be auto-converted to an ImageCoordinatesIterator + image_coordinate_iterator: Union[DataCoordinatesIterator, + Iterable[DataCoordinates], + Iterable[Dict[str, Union[int, str]]]] + + @field_validator('image_coordinate_iterator', mode='before') + def _convert_to_image_coordinates_iterator(cls, v): + return DataCoordinatesIterator.create(v) + + def put_data(self, data_coordinates: DataCoordinates, image: np.ndarray, metadata: Dict): """ Put data into the output queue """ - self.data_output_queue.put(self._future, image_coordinates, image, metadata) - + self._data_handler.put(data_coordinates, image, metadata, self._future_weakref()) -class ReadoutImages(DataProducingAcquisitionEvent): - """ - Readout one or more images (and associated metadata) from a camera - - Attributes: - num_images (int): The number of images to read out. - camera (Camera): The camera object to read images from. - image_coordinate_iterator (Iterable[ImageCoordinates]): An iterator or list of ImageCoordinates objects, which - specify the coordinates of the images that will be read out, should be able to provide at least num_images - elements. - """ - num_images: int - camera: Camera - - def execute(self): - image_counter = itertools.count() if self.num_images is None else range(self.num_images) - for image_number, image_coordinates in zip(image_counter, self.image_coordinate_iterator): - while True: - # TODO: read from state to check for cancel condition - # this can be made more efficient in the future with a new image buffer that provides callbacks - # on a new image recieved so that polling can be avoided - image, metadata = self.camera.pop_image(timeout=0.01) # only block for 10 ms so stop event can be checked - if image is not None: - self.put_data(image_coordinates, image, metadata) - break - - -class StartCapture(AcquisitionEvent): - """ - Special device instruction that captures images from a camera - """ - num_images: int - camera: Camera - - def execute(self): - """ - Capture images from the camera - """ - try: - self.camera.arm(self.num_images) - self.camera.start() - except Exception as e: - self.camera.stop() - raise e diff --git a/pycromanager/acquisition/new/data_coords.py b/pycromanager/acquisition/new/data_coords.py new file mode 100644 index 00000000..46779203 --- /dev/null +++ b/pycromanager/acquisition/new/data_coords.py @@ -0,0 +1,146 @@ +from typing import Union, List, Tuple, Callable, Dict +from typing import Dict, Union, Optional, Iterator, List, Tuple, Iterable, Sequence +from pydantic import BaseModel +from pydantic.fields import Field + +class DataCoordinates(BaseModel): + """ + Represents the coordinates of a piece of data (conventionally, a single 2D image). This is a convenience wrapper + around a dictionary of axis name to axis value where the axis value can be either an integer or a string. + """ + coordinate_dict: Dict[str, Union[int, str]] = Field(default_factory=dict) + + def __init__(self, coordinate_dict: Dict[str, Union[int, str]] = None, + time: int = None, channel: str = None, z: int = None, **kwargs): + if coordinate_dict is not None: + self.coordinate_dict = coordinate_dict + if time is not None or channel is not None or z is not None: + raise ValueError("If coordinate_dict is provided, time, channel, and z must not be provided.") + # if time/channel/z are not None, add them to the kwargs + if time is not None: + kwargs['time'] = time + if channel is not None: + kwargs['channel'] = channel + if z is not None: + kwargs['z'] = z + super().__init__(**kwargs) + + def __getitem__(self, key: str) -> Union[int, str]: + return self.coordinate_dict[key] + + def __setitem__(self, key: str, value: Union[int, str]) -> None: + self.coordinate_dict[key] = value + + def __delitem__(self, key: str) -> None: + del self.coordinate_dict[key] + + def __contains__(self, key: str) -> bool: + return key in self.coordinate_dict + + def __getattr__(self, item: str) -> Union[int, str]: + if item in self.coordinate_dict: + return self.coordinate_dict[item] + else: + raise AttributeError(f"Attribute {item} not found") + + def __setattr__(self, key: str, value: Union[int, str]) -> None: + if key == 'coordinate_dict': + super().__setattr__(key, value) + else: + self.coordinate_dict[key] = value + + def __delattr__(self, item: str) -> None: + if item in self.coordinate_dict: + del self.coordinate_dict[item] + else: + super().__delattr__(item) + + def __eq__(self, other): + if isinstance(other, DataCoordinates): + return self.coordinate_dict == other.coordinate_dict + elif isinstance(other, dict): + return self.coordinate_dict == other + return NotImplemented + + def __hash__(self): + return hash(frozenset(self.coordinate_dict.items())) + + +class DataCoordinatesIterator: + @classmethod + def create(cls, image_coordinate_iterable: Union[Iterable[DataCoordinates], Iterable[Dict[str, Union[int, str]]], + DataCoordinates, Dict[str, Union[int, str], + 'DataCoordinatesIterator']]): + """ + Autoconvert ImageCoordinates, dictionaries, or Iterables thereof to ImageCoordinatesIterator + + :param image_coordinate_iterable: an ImageCoordinates object, a dictionary, + an iterable of ImageCoordinates or dictionaries, or an ImageCoordinatesIterator. Valid options include + a list of ImageCoordinates, a list of dictionaries, a generator of ImageCoordinates, + a generator of dictionaries, etc. + """ + if isinstance(image_coordinate_iterable, cls): + return image_coordinate_iterable + + if isinstance(image_coordinate_iterable, DataCoordinates): + return cls([image_coordinate_iterable]) + if isinstance(image_coordinate_iterable, dict): + return cls([DataCoordinates(**image_coordinate_iterable)]) + + instance = super().__new__(cls) + instance._initialize(image_coordinate_iterable) + return instance + + def might_produce_coordinates(self, coordinates: DataCoordinates) -> Optional[bool]: + """ + Check if this iterator might produce the given coordinates. If this iterator is backed by a finite list of + ImageCoordinates, this can be checked definitively. If it is backed by something infinite (like a generator), + it will only be possible if more information about the generator is known (e.g. it produces {time: 0}, {time:1}, + and continues incrementing). + + If not possible to determine definitely, return None + """ + if isinstance(self._backing_iterable, Sequence): + return any(self._compare_coordinates(coord, coordinates) for coord in self._backing_iterable) + + # TODO: cases where you pass in an object that increments with a known pattern + + # For non-sequences (like generators), we can't determine definitely without further information + return None + + def is_finite(self) -> bool: + """ + Check if this iterator is finite (i.e. will eventually run out of elements) + """ + return isinstance(self._backing_iterable, Sequence) + + @staticmethod + def _compare_coordinates(coord, target): + if isinstance(coord, dict): + coord = DataCoordinates(**coord) + return all(getattr(coord, key) == value for key, value in target.__dict__.items()) + + def __new__(cls, *args, **kwargs): + raise TypeError( + "ImageCoordinatesIterator cannot be instantiated directly. Use ImageCoordinatesIterator.create() instead.") + + def _initialize(self, data): + self._backing_iterable = data + self._iterator = iter(data) + + def __iter__(self): + return self + + def __next__(self): + try: + next_item = next(self._iterator) + if isinstance(next_item, dict): + return DataCoordinates(**next_item) + elif isinstance(next_item, DataCoordinates): + return next_item + else: + raise TypeError(f"Unexpected item type: {type(next_item)}. Expected ImageCoordinates or dict.") + except StopIteration: + raise + except Exception as e: + raise TypeError(f"Error processing next item: {str(e)}") diff --git a/pycromanager/acquisition/new/data_handler.py b/pycromanager/acquisition/new/data_handler.py new file mode 100644 index 00000000..53fc88dd --- /dev/null +++ b/pycromanager/acquisition/new/data_handler.py @@ -0,0 +1,173 @@ +import threading +import queue +from typing import Any, Dict, Tuple, Callable, Union, Iterable, Sequence, Optional +import numpy as np + +from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.new.data_storage import DataStorageAPI +from pycromanager.acquisition.new.acq_events import AcquisitionFuture +from pydantic.json import JsonValue + +class _PeekableQueue(queue.Queue): + def peek(self): + with self.not_empty: + while not self._qsize(): + self.not_empty.wait() + with self.mutex: + return self.queue[0] + +class DataHandler: + """ + Object that handles acquired data while it is waiting to be saved. This object is thread safe and handles + the handoff of images while they are waiting to be saved, providing temporary access to it along the way. + + This class manages one or two queues/threads, depending on whether a processing function is provided. If a + processing function is provided, the data will be processed in a separate thread before being passed to the data + storage object. If no processing function is provided, the data will be passed directly to the data storage object. + """ + + # This class must create at least one additional thread (the saving thread) + # and may create another for processing data + + + def __init__(self, storage: DataStorageAPI, + process_function: Callable[[DataCoordinates, np.ndarray, JsonValue], + Optional[Union[DataCoordinates, np.ndarray, JsonValue, + Sequence[DataCoordinates, np.ndarray, JsonValue]]]] = None): + self._storage = storage + self._process_function = process_function + self._intake_queue = _PeekableQueue() + # locks for synchronizing access the queues/dicts + self._intake_lock = threading.Lock() + self._data_metadata_future: Dict[Any, Tuple[np.ndarray, Dict, Optional[AcquisitionFuture]]] = {} + self._intake_thread = threading.Thread(target=self._run_intake_thread) + self._intake_thread.start() + if process_function: + self._processed_lock = threading.Lock() + self._processed_queue = _PeekableQueue() + self._storage_thread = threading.Thread(target=self._run_storage_thread) + self._storage_thread.start() + + @staticmethod + def _unpack_processed_image(processed): + """ Convert coordinates dict to DataCoordinates object if necessary """ + coordinates, data, metadata = processed + if isinstance(coordinates, dict): + coordinates = DataCoordinates(coordinates) + return coordinates, data, metadata + + def _run_intake_thread(self): + while True: + if self._process_function: + with self._intake_lock: + coordinates = self._intake_queue.peek() + # shutdown condition + if coordinates is None: + self._intake_queue.get() + # TODO: it would be nice to give a signal to the image processor to shut down + # probably could do this by adding a new protocol that can be checked + # to allow backwards compatibility + self._processed_queue.put(None) # propagate the shutdown signal + break + + data, metadata, _ = self._data_metadata_future[coordinates] + processed = self._process_function(coordinates, data, metadata) + # deal with the fact that the processor may return no items, a single item, or a list of items + with self._intake_lock: + with self._processed_lock: + # Move from intake queue to processed or saving queue as appropriate + coordinates = self._intake_queue.get() + # discard old data/metadata because it has now been processed + _, _, future = self._data_metadata_future.pop(coordinates) + if processed is None: + pass # the data was discarded or diverted + elif isinstance(processed, tuple) and not isinstance(processed[0], tuple): # single item + coordinates, data, metadata = self._unpack_processed_image(processed) + self._processed_queue.put(coordinates) + self._data_metadata_future[coordinates] = (data, metadata, future) + future._notify_data(coordinates, data, metadata, processed=True, saved=False) + else: # multiple items + for item in processed: + coordinates, data, metadata = self._unpack_processed_image(item) + self._processed_queue.put(coordinates) + self._data_metadata_future[coordinates] = (data, metadata, future) + future._notify_data(coordinates, data, metadata, processed=True, saved=False) + else: + # transfer to storage thread + shutdown = self._transfer_to_storage() + if shutdown: + break + + def _run_storage_thread(self): + """ if an image processor is active, this additional thread will take its processed images and save them """ + while True: + shutdown = self._transfer_to_storage() + if shutdown: + break + def _transfer_to_storage(self): + """ + Take items from the source queue and put them into the storage queue. If there is a processing function, + the source queue is the output queue of the processing function. If there is no processing function, the source + queue is the intake queue. + """ + # lock = self._processed_lock if self._process_function else self._intake_lock + coordinates = self._processed_queue.peek() if self._process_function else self._intake_queue.peek() + if coordinates is None: + self._processed_queue.get() if self._process_function else self._intake_queue.get() # remove it + self._storage.finish() + return True + else: + data, metadata, future = self._data_metadata_future[coordinates] + self._storage.put(coordinates, data, metadata) # once this returns the storage is responsible for the data + lock = self._processed_lock if self._process_function else self._intake_lock + with lock: + if future: + future._notify_data(coordinates, data, metadata, processed=True, saved=True) + coordinates = self._processed_queue.get() if self._process_function else self._intake_queue.get() + self._data_metadata_future.pop(coordinates) + return False + + def join(self): + """ + Wait for the threads to finish + """ + self._intake_thread.join() + if self._storage_thread: + self._storage_thread.join() + + def get(self, coordinates: DataCoordinates, return_data=True, return_metadata=True + ) -> Optional[Tuple[np.ndarray, JsonValue]]: + """ + Get an image and associated metadata. If they are present, either in the intake queue or the storage queue + (if it exists), return them. If not present, get them from the storage object. If not present there, return None + """ + data_metadata_future = self._data_metadata_future.get(coordinates, None) + data, metadata = None, None + if data_metadata_future: + data, metadata, future = data_metadata_future + else: + # its not currently managed by the data handler, so check the storage object + # don't do both if you dont have to because this may be from disk + if return_data: + data = self._storage.get_data(coordinates) + if data is None: + raise KeyError(f"Image with coordinates {coordinates} not found") + if return_metadata: + metadata = self._storage.get_metadata(coordinates) + if metadata is None: + raise KeyError(f"Metadata with coordinates {coordinates} not found") + return data, metadata + + + def put(self, coordinates: Any, image: np.ndarray, metadata: Dict, acquisition_future: Optional[AcquisitionFuture]): + """ + Hand off this image to the data handler. It will handle handoff to the storage object and image processing + if requested, as well as providing temporary access to the image and metadata as it passes throught this + pipeline. If an acquisition future is provided, it will be notified when the image arrives, is processed, and + is stored. + """ + with self._intake_lock: + self._intake_queue.put(coordinates) + self._data_metadata_future[coordinates] = (image, metadata, acquisition_future) + if acquisition_future: + acquisition_future._notify_data(coordinates, image, metadata, processed=False, saved=False) diff --git a/pycromanager/acquisition/new/data_storage.py b/pycromanager/acquisition/new/data_storage.py new file mode 100644 index 00000000..be0c1828 --- /dev/null +++ b/pycromanager/acquisition/new/data_storage.py @@ -0,0 +1,164 @@ +""" +Protocol for storage class that acquisitions ultimate write to where the acquisition data ultimately gets stored +""" + +from typing import Protocol, runtime_checkable, Union, List, Tuple, Dict, Any +from pycromanager.acquisition.new.data_coords import DataCoordinates +import numpy as np +from pydantic.json import JsonValue + + +@runtime_checkable +class DataStorageAPI(Protocol): + + # TODO: about these type hints: better to use the dicts only or the DataCoordinates. + # TODO: Also do this with kwargs + def __contains__(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> bool: + """Check if item is in the container.""" + ... + + def get_data(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> np.ndarray: + """ + Read a single data corresponding to the given coordinates + """ + ... + + def get_metadata(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> JsonValue: + """ + Read metadata corresponding to the given coordinates + """ + ... + + # TODO: one alternative to saying you have to make the data immediately available is to have a callback + # that is called when the data is available. This would allow for disk-backed storage to write the data + # to disk before calling the callback. + def put(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]], data: np.ndarray, + metadata: JsonValue): + """ + Add data and corresponding metadata to the dataset. Once this method has been called, the data and metadata + should be immediately available to be read by get_data and get_metadata. For disk-backed storage, this may + require temporarily caching the data in memory until it can be written to disk. + + Parameters + ---------- + data_coordinates : DataCoordinates or dict + Coordinates of the data + data : np.ndarray + Data to be stored + metadata : dict + Metadata associated with the data + """ + ... + + def finish(self): + """ + No more data will be added to the dataset. This method should be called after the last call to put() + and makes the dataset read-only. + """ + ... + + def close(self): + """ + Close the dataset, releasing any resources it holds. No more images will be added or requested + """ + ... + + # @abstractmethod + # def initialize(self, summary_metadata: dict): + # """ + # Initialize the dataset with summary metadata + # """ + # pass + +# @abstractmethod +# def get_image_coordinates_list(self) -> List[Dict[str, Union[int, str]]]: +# """ +# Return a list of the coordinates (e.g. {'channel': 'DAPI', 'z': 0, 'time': 0}) of every image in the dataset +# +# Returns +# ------- +# list +# List of image coordinates +# """ +# pass +# +# @abstractmethod +# def await_new_image(self, timeout=None): +# """ +# Wait for a new image to arrive in the dataset +# +# Parameters +# ---------- +# timeout : float, optional +# Maximum time to wait in seconds (Default value = None) +# +# Returns +# ------- +# bool +# True if a new image has arrived, False if the timeout was reached +# """ +# pass +# +# @abstractmethod +# def is_finished(self) -> bool: +# """ +# Check if the dataset is finished and no more images will be added +# """ +# pass +# +# +# +# @abstractmethod +# def as_array(self, axes: List[str] = None, stitched: bool = False, +# **kwargs: Union[int, str]) -> 'dask.array': +# """ +# Create one big Dask array with last two axes as y, x and preceding axes depending on data. +# If the dataset is saved to disk, the dask array is made up of memory-mapped numpy arrays, +# so the dataset does not need to be able to fit into RAM. +# If the data doesn't fully fill out the array (e.g. not every z-slice collected at every time point), +# zeros will be added automatically. +# +# To convert data into a numpy array, call np.asarray() on the returned result. However, doing so will bring the +# data into RAM, so it may be better to do this on only a slice of the array at a time. +# +# Parameters +# ---------- +# axes : list, optional +# List of axes names over which to iterate and merge into a stacked array. +# If None, all axes will be used in PTCZYX order (Default value = None). +# stitched : bool, optional +# If True and tiles were acquired in a grid, lay out adjacent tiles next to one another +# (Default value = False) +# **kwargs : +# Names and integer positions of axes on which to slice data +# +# Returns +# ------- +# dataset : dask array +# Dask array representing the dataset +# """ +# pass +# +# class WritableNDStorageAPI(NDStorageAPI): +# """ +# API for NDStorage classes to which images can be written +# """ +# + +# +# @abstractmethod +# def block_until_finished(self, timeout=None): +# """ +# Block until the dataset is finished and all images have been written +# +# Parameters +# ---------- +# timeout : float, optional +# Maximum time to wait in seconds (Default value = None) +# +# Returns +# ------- +# bool +# True if the dataset is finished, False if the timeout was reached +# """ +# pass diff --git a/pycromanager/acquisition/new/devices.py b/pycromanager/acquisition/new/devices.py index 19e22c12..2f9dfcd4 100644 --- a/pycromanager/acquisition/new/devices.py +++ b/pycromanager/acquisition/new/devices.py @@ -1,3 +1,6 @@ +""" +APIs (protocols) for devices that can be used in the acquisition module +""" import numpy as np from typing_extensions import Protocol, runtime_checkable diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/acquisition/new/executor.py index a8f7aaee..260b46e1 100644 --- a/pycromanager/acquisition/new/executor.py +++ b/pycromanager/acquisition/new/executor.py @@ -147,7 +147,7 @@ def __init__(self, num_threads=1): def _start_new_thread(self): self._threads.append(_ExecutionThreadManager()) - def submit_event(self, event, prioritize=False, use_free_thread=False, data_output_queue=None): + def submit_event(self, event, prioritize=False, use_free_thread=False, data_handler: DataHandler = None): """ Submit an event for execution on one of the active threads. By default, all events will be executed on a single thread in the order they were submitted. This is the simplest way to prevent concurrency issues @@ -157,19 +157,19 @@ def submit_event(self, event, prioritize=False, use_free_thread=False, data_outp Parameters: event (AcquisitionEvent): The event to execute + data_storage (DataStorage): The data storage object to put data into if the event produces data prioritize (bool): If True, the event will be executed before any other events queued on its execution thread use_free_thread (bool): If True, the event will be executed on a thread that is not currently executing and has nothing in its queue, creating a new thread if necessary. This is needed, for example, when using an event to cancel or stop another event that is awaiting a stop signal to be rewritten to the state. If this is set to False (the default), the event will be executed on the primary thread. - data_output_queue (DataOutputQueue): The queue to put data into if the event produces data + data_handler (DataHandler): The queue to put data into if the event produces data """ # check that DataProducingAcquisitionEvents have a data output queue - if isinstance(event, DataProducingAcquisitionEvent) and data_output_queue is None: + if isinstance(event, DataProducingAcquisitionEvent) and data_handler is None: raise ValueError("DataProducingAcquisitionEvent must have a data_output_queue argument") - future = AcquisitionFuture(event) - event._future = future + future = AcquisitionFuture(event=event, data_handler=data_handler) if use_free_thread: for thread in self._threads: if thread.is_free(): diff --git a/pycromanager/acquisition/new/image_coords.py b/pycromanager/acquisition/new/image_coords.py deleted file mode 100644 index 3128d149..00000000 --- a/pycromanager/acquisition/new/image_coords.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Union, List, Tuple, Callable, Dict -from typing import Dict, Union, Optional, Iterator, List -from pydantic import BaseModel -from pydantic.fields import Field - -class ImageCoordinates(BaseModel): - """ - Represents the coordinates of an image. This is a convenience wrapper around a dictionary of axis name to axis value - where the axis value can be either an integer or a string. - """ - coordinate_dict: Dict[str, Union[int, str, Tuple[int, ...], Tuple[str, ...]]] = Field(default_factory=dict) - - def __init__(self, time: int = None, channel: str = None, z: int = None, **kwargs): - # Initialize the BaseModel (this runs Pydantic validation and parsing) - # if time/channel/z are not None, add them to the kwargs - if time is not None: - kwargs['time'] = time - if channel is not None: - kwargs['channel'] = channel - if z is not None: - kwargs['z'] = z - super().__init__(**kwargs) - - def __getitem__(self, key: str) -> Union[int, str]: - return self.coordinate_dict[key] - - def __setitem__(self, key: str, value: Union[int, str]) -> None: - self.coordinate_dict[key] = value - - def __delitem__(self, key: str) -> None: - del self.coordinate_dict[key] - - def __contains__(self, key: str) -> bool: - return key in self.coordinate_dict - - def __getattr__(self, item: str) -> Union[int, str]: - if item in self.coordinate_dict: - return self.coordinate_dict[item] - else: - raise AttributeError(f"Attribute {item} not found") - - def __setattr__(self, key: str, value: Union[int, str]) -> None: - if key == 'coordinate_dict': - super().__setattr__(key, value) - else: - self.coordinate_dict[key] = value - - def __delattr__(self, item: str) -> None: - if item in self.coordinate_dict: - del self.coordinate_dict[item] - else: - super().__delattr__(item) - -# TODO make a nicer way to implement this... -# class ImageCoordinateIterator(BaseModel): -# coordinate_dict: Dict[Tuple[str, Union[int, str, Tuple[int, ...], Tuple[str, ...]]] -# -# -# def __iter__(self) -> Iterator['ImageCoordinates']: -# -# def __next__(self) -> 'ImageCoordinates': - diff --git a/pycromanager/acquisition/new/implementations/__init__.py b/pycromanager/acquisition/new/implementations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/implementations/data_storage_implementations.py b/pycromanager/acquisition/new/implementations/data_storage_implementations.py new file mode 100644 index 00000000..9f6093c7 --- /dev/null +++ b/pycromanager/acquisition/new/implementations/data_storage_implementations.py @@ -0,0 +1,10 @@ +""" + +TODO: adapters for NDTiff and NDRam + +""" + +# Can you just pass in data coords directly cause of Duck typing? +# Maybe implement a protocol in NDTiff to described dict-like objects? + +# TODO: may need to implement items() for data coords \ No newline at end of file diff --git a/pycromanager/acquisition/new/implementations/event_implementations.py b/pycromanager/acquisition/new/implementations/event_implementations.py new file mode 100644 index 00000000..197ec847 --- /dev/null +++ b/pycromanager/acquisition/new/implementations/event_implementations.py @@ -0,0 +1,56 @@ +""" +This file contains implementations of AcquisitionEvents that can be used to build full experiments +""" +from typing import Iterable +import itertools +from pycromanager.acquisition.new.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.devices import Camera +from pycromanager.acquisition.new.data_coords import DataCoordinates + + +class ReadoutImages(DataProducingAcquisitionEvent): + """ + Readout one or more images (and associated metadata) from a camera + + Attributes: + num_images (int): The number of images to read out. + camera (Camera): The camera object to read images from. + image_coordinate_iterator (Iterable[DataCoordinates]): An iterator or list of ImageCoordinates objects, which + specify the coordinates of the images that will be read out, should be able to provide at least num_images + elements. + """ + num_images: int + camera: Camera + # TODO: maybe specify here that this should run on a seperate thread? + + def execute(self): + image_counter = itertools.count() if self.num_images is None else range(self.num_images) + for image_number, image_coordinates in zip(image_counter, self.image_coordinate_iterator): + while True: + # TODO: read from state to check for cancel condition + # this can be made more efficient in the future with a new image buffer that provides callbacks + # on a new image recieved so that polling can be avoided + image, metadata = self.camera.pop_image(timeout=0.01) # only block for 10 ms so stop event can be checked + if image is not None: + self.put_data(image_coordinates, image, metadata) + break + + +class StartCapture(AcquisitionEvent): + """ + Special device instruction that captures images from a camera + """ + num_images: int + camera: Camera + + def execute(self): + """ + Capture images from the camera + """ + try: + self.camera.arm(self.num_images) + self.camera.start() + except Exception as e: + self.camera.stop() + raise e + diff --git a/pycromanager/acquisition/new/mm_device_implementations.py b/pycromanager/acquisition/new/implementations/mm_device_implementations.py similarity index 100% rename from pycromanager/acquisition/new/mm_device_implementations.py rename to pycromanager/acquisition/new/implementations/mm_device_implementations.py diff --git a/pycromanager/acquisition/new/test/__init__.py b/pycromanager/acquisition/new/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/sandbox_device.py b/pycromanager/acquisition/new/test/sandbox_device.py similarity index 84% rename from pycromanager/acquisition/new/sandbox_device.py rename to pycromanager/acquisition/new/test/sandbox_device.py index 6c43643d..095eddeb 100644 --- a/pycromanager/acquisition/new/sandbox_device.py +++ b/pycromanager/acquisition/new/test/sandbox_device.py @@ -1,8 +1,6 @@ -import time - from pycromanager import start_headless -from pycromanager.acquisition.new.image_coords import ImageCoordinates -from pycromanager.acquisition.new.mm_device_implementations import MicroManagerCamera +from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera import os mm_install_dir = '/Users/henrypinkard/Micro-Manager' @@ -20,14 +18,14 @@ executor = AcquisitionEventExecutor() -from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataOutputQueue +from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataHandler num_images = 100 -data_output_queue = DataOutputQueue() +data_output_queue = DataHandler() start_capture_event = StartCapture(num_images=num_images, camera=camera) readout_images_event = ReadoutImages(num_images=num_images, camera=camera, - image_coordinate_iterator=[ImageCoordinates(time=t) for t in range(num_images)], + image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], output_queue=data_output_queue) executor.submit_event(start_capture_event) diff --git a/pycromanager/acquisition/new/test/t3st_event_execution.py b/pycromanager/acquisition/new/test/t3st_event_execution.py index ddbbe0d3..e7c16db4 100644 --- a/pycromanager/acquisition/new/test/t3st_event_execution.py +++ b/pycromanager/acquisition/new/test/t3st_event_execution.py @@ -1,11 +1,9 @@ -import time - from pycromanager import start_headless -from pycromanager.acquisition.new.image_coords import ImageCoordinates -from pycromanager.acquisition.new.mm_device_implementations import MicroManagerCamera +from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera import os from pycromanager.acquisition.new.executor import AcquisitionEventExecutor -from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataOutputQueue +from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataHandler mm_install_dir = '/Users/henrypinkard/Micro-Manager' @@ -25,11 +23,11 @@ num_images = 100 -data_output_queue = DataOutputQueue() +data_output_queue = DataHandler() start_capture_event = StartCapture(num_images=num_images, camera=camera) readout_images_event = ReadoutImages(num_images=num_images, camera=camera, - image_coordinate_iterator=[ImageCoordinates(time=t) for t in range(num_images)], + image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], output_queue=data_output_queue) executor.submit_event(start_capture_event) From 054171831cf2d13e2b076ce32b7f55b04674def2 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Wed, 26 Jun 2024 09:34:21 +0200 Subject: [PATCH 06/20] many data coordinates tests --- pycromanager/acquisition/new/data_coords.py | 99 +++++++++++++++------ 1 file changed, 72 insertions(+), 27 deletions(-) diff --git a/pycromanager/acquisition/new/data_coords.py b/pycromanager/acquisition/new/data_coords.py index 46779203..591a29ac 100644 --- a/pycromanager/acquisition/new/data_coords.py +++ b/pycromanager/acquisition/new/data_coords.py @@ -1,7 +1,5 @@ -from typing import Union, List, Tuple, Callable, Dict -from typing import Dict, Union, Optional, Iterator, List, Tuple, Iterable, Sequence -from pydantic import BaseModel -from pydantic.fields import Field +from typing import Dict, Union, Optional, Iterator, List, Tuple, Iterable, Sequence, Any +from pydantic import BaseModel, Field class DataCoordinates(BaseModel): """ @@ -9,14 +7,15 @@ class DataCoordinates(BaseModel): around a dictionary of axis name to axis value where the axis value can be either an integer or a string. """ coordinate_dict: Dict[str, Union[int, str]] = Field(default_factory=dict) + time: Optional[int] = None + channel: Optional[str] = None + z: Optional[int] = None def __init__(self, coordinate_dict: Dict[str, Union[int, str]] = None, time: int = None, channel: str = None, z: int = None, **kwargs): + # add coordinate dict to kwargs for pydantic type coercion if coordinate_dict is not None: - self.coordinate_dict = coordinate_dict - if time is not None or channel is not None or z is not None: - raise ValueError("If coordinate_dict is provided, time, channel, and z must not be provided.") - # if time/channel/z are not None, add them to the kwargs + kwargs['coordinate_dict'] = coordinate_dict if time is not None: kwargs['time'] = time if channel is not None: @@ -25,11 +24,41 @@ def __init__(self, coordinate_dict: Dict[str, Union[int, str]] = None, kwargs['z'] = z super().__init__(**kwargs) + other_axis_names = [key for key in kwargs.keys() if key not in ['coordinate_dict', 'time', 'channel', 'z']] + if coordinate_dict is not None and ((time is not None or channel is not None or z is not None) or + len(other_axis_names) > 0): + raise ValueError("If coordinate_dict is provided, time, channel, and z or other axis names " + "must not be provided.") + + # Handle the special case of time, channel, and z + if time is not None: + self.coordinate_dict['time'] = time + if channel is not None: + self.coordinate_dict['channel'] = channel + if z is not None: + self.coordinate_dict['z'] = z + + # set other axis names as attributes + for key in other_axis_names: # if theyre in kwargs + setattr(self, key, kwargs[key]) + # if theyre in coordinate_dict + if coordinate_dict is not None: + for key, value in coordinate_dict.items(): + if not hasattr(self, key): + setattr(self, key, value) + + + class Config: + validate_assignment = True + extra = 'allow' # allow setting of other axis names as attributes that are not in the model + def __getitem__(self, key: str) -> Union[int, str]: return self.coordinate_dict[key] def __setitem__(self, key: str, value: Union[int, str]) -> None: self.coordinate_dict[key] = value + # update the attribute + setattr(self, key, value) def __delitem__(self, key: str) -> None: del self.coordinate_dict[key] @@ -37,23 +66,16 @@ def __delitem__(self, key: str) -> None: def __contains__(self, key: str) -> bool: return key in self.coordinate_dict - def __getattr__(self, item: str) -> Union[int, str]: - if item in self.coordinate_dict: - return self.coordinate_dict[item] - else: - raise AttributeError(f"Attribute {item} not found") - def __setattr__(self, key: str, value: Union[int, str]) -> None: - if key == 'coordinate_dict': - super().__setattr__(key, value) - else: + super().__setattr__(key, value) + # Keep a redundant copy in the coordinate_dict + if not key.startswith('_'): self.coordinate_dict[key] = value def __delattr__(self, item: str) -> None: + super().__delattr__(item) if item in self.coordinate_dict: del self.coordinate_dict[item] - else: - super().__delattr__(item) def __eq__(self, other): if isinstance(other, DataCoordinates): @@ -65,12 +87,33 @@ def __eq__(self, other): def __hash__(self): return hash(frozenset(self.coordinate_dict.items())) + def items(self): + return self.coordinate_dict.items() + + def keys(self): + return self.coordinate_dict.keys() + + def values(self): + return self.coordinate_dict.values() + + def get(self, key, default=None): + return self.coordinate_dict.get(key, default) + + def clear(self): + self.coordinate_dict.clear() + + def __len__(self): + return len(self.coordinate_dict) + + def __iter__(self): + return iter(self.coordinate_dict) + class DataCoordinatesIterator: @classmethod def create(cls, image_coordinate_iterable: Union[Iterable[DataCoordinates], Iterable[Dict[str, Union[int, str]]], - DataCoordinates, Dict[str, Union[int, str], - 'DataCoordinatesIterator']]): + DataCoordinates, + Dict[str, Union[Union[int, str], 'DataCoordinatesIterator']]]): """ Autoconvert ImageCoordinates, dictionaries, or Iterables thereof to ImageCoordinatesIterator @@ -83,14 +126,19 @@ def create(cls, image_coordinate_iterable: Union[Iterable[DataCoordinates], Iter return image_coordinate_iterable if isinstance(image_coordinate_iterable, DataCoordinates): - return cls([image_coordinate_iterable]) - if isinstance(image_coordinate_iterable, dict): - return cls([DataCoordinates(**image_coordinate_iterable)]) + image_coordinate_iterable = [image_coordinate_iterable] + elif isinstance(image_coordinate_iterable, dict): + image_coordinate_iterable = [image_coordinate_iterable] instance = super().__new__(cls) instance._initialize(image_coordinate_iterable) return instance + def __new__(cls, *args, **kwargs): + raise TypeError( + "ImageCoordinatesIterator cannot be instantiated directly. Use ImageCoordinatesIterator.create() instead.") + + def might_produce_coordinates(self, coordinates: DataCoordinates) -> Optional[bool]: """ Check if this iterator might produce the given coordinates. If this iterator is backed by a finite list of @@ -120,9 +168,6 @@ def _compare_coordinates(coord, target): coord = DataCoordinates(**coord) return all(getattr(coord, key) == value for key, value in target.__dict__.items()) - def __new__(cls, *args, **kwargs): - raise TypeError( - "ImageCoordinatesIterator cannot be instantiated directly. Use ImageCoordinatesIterator.create() instead.") def _initialize(self, data): self._backing_iterable = data From 446e8166504aa8b5cb2d69cf58ce79836a25a87a Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Wed, 26 Jun 2024 09:34:29 +0200 Subject: [PATCH 07/20] many data coordinates tests --- .../acquisition/new/test/test_data_coords.py | 158 ++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 pycromanager/acquisition/new/test/test_data_coords.py diff --git a/pycromanager/acquisition/new/test/test_data_coords.py b/pycromanager/acquisition/new/test/test_data_coords.py new file mode 100644 index 00000000..e5af1a4a --- /dev/null +++ b/pycromanager/acquisition/new/test/test_data_coords.py @@ -0,0 +1,158 @@ +import pytest +from pydantic import ValidationError +from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +import numpy as np + +def test_init_with_dict(): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) + assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "z": 0} + +def test_init_with_individual_axes(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "z": 0} + +def test_init_with_both_raises_error(): + with pytest.raises(ValueError): + DataCoordinates(coordinate_dict={"time": 1}, time=2) + +def test_init_with_float(): + coords = DataCoordinates(time=1.0, channel="DAPI", z=0) + assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "z": 0} + assert coords.time == 1 + assert type(coords.time) is int + +def test_init_exception_with_non_coercible_float_value(): + with pytest.raises(ValidationError): + DataCoordinates(time=1.5, channel="DAPI", z=0) + +def test_set_attr_exception_with_non_coercible_float_value(): + coords = DataCoordinates(channel="DAPI", z=0) + with pytest.raises(ValidationError): + coords.time = 1.5 + +def test_init_with_numpy(): + for time in np.arange(5): + coords = DataCoordinates(time=time, channel="DAPI", z=0) + assert coords.coordinate_dict == {"time": time, "channel": "DAPI", "z": 0} + assert coords.time == time + assert type(coords.time) is int + +def test_attr_with_float(): + coords = DataCoordinates(channel="DAPI", z=0) + coords.time = 1.0 + assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "z": 0} + assert coords.time == 1 + assert type(coords.time) is int + +def test_attr_with_numpy(): + for time in np.arange(5): + coords = DataCoordinates(channel="DAPI", z=0) + coords.time = time + assert coords.coordinate_dict == {"time": time, "channel": "DAPI", "z": 0} + assert coords.time == time + assert type(coords.time) is int + +def test_getitem(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + assert coords["time"] == 1 + assert coords["channel"] == "DAPI" + assert coords["z"] == 0 + +def test_setitem(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + coords["time"] = 2 + assert coords["time"] == 2 + +def test_contains(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + assert "time" in coords + assert "channel" in coords + assert "z" in coords + +def test_attr_access(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + assert coords.time == 1 + assert coords.channel == "DAPI" + assert coords.z == 0 + +def test_equality(): + coords1 = DataCoordinates(time=1, channel="DAPI", z=0) + coords2 = DataCoordinates(time=1, channel="DAPI", z=0) + assert coords1 == coords2 + + coords3 = {"time": 1, "channel": "DAPI", "z": 0} + assert coords1 == coords3 + +def test_iteration(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + keys = [key for key in coords] + assert keys == ["time", "channel", "z"] + +def test_non_standard_axis_names(): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10}) + assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "depth": 10} + assert coords.depth == 10 + +def test_validation_error_with_non_standard_axis_names_with_float(): + with pytest.raises(ValidationError): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10.5}) + +def test_setitem_non_standard_axis(): + coords = DataCoordinates(time=1, channel="DAPI", z=0) + coords["depth"] = 5 + assert coords["depth"] == 5 + assert coords.depth == 5 + +def test_contains_non_standard_axis(): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10}) + assert "depth" in coords + assert coords.depth == 10 + +def test_attr_access_non_standard_axis(): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10}) + assert coords.depth == 10 + +def test_equality_non_standard_axis(): + coords1 = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10}) + coords2 = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10}) + assert coords1 == coords2 + + coords3 = {"time": 1, "channel": "DAPI", "depth": 10} + assert coords1 == coords3 + +def test_iteration_non_standard_axis(): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "depth": 10}) + keys = [key for key in coords] + assert keys == ["time", "channel", "depth"] +####################################### +#### Test DataCoordinatesIterator ##### +####################################### + +def test_data_coordinates_iterator_create(): + coords_list = [DataCoordinates(time=i, channel="DAPI", z=0) for i in range(3)] + iterator = DataCoordinatesIterator.create(coords_list) + assert iterator.is_finite() + assert list(iterator) == coords_list + +def test_data_coordinates_iterator_single(): + coord = DataCoordinates(time=1, channel="DAPI", z=0) + iterator = DataCoordinatesIterator.create(coord) + assert iterator.is_finite() + assert list(iterator) == [coord] + +def test_data_coordinates_iterator_dict(): + coord_dict = {"time": 1, "channel": "DAPI", "z": 0} + iterator = DataCoordinatesIterator.create(coord_dict) + assert iterator.is_finite() + assert list(iterator) == [DataCoordinates(**coord_dict)] + +def test_data_coordinates_iterator_contains(): + coords_list = [DataCoordinates(time=i, channel="DAPI", z=0) for i in range(3)] + iterator = DataCoordinatesIterator.create(coords_list) + coord = DataCoordinates(time=1, channel="DAPI", z=0) + assert iterator.might_produce_coordinates(coord) == True + coord_not_in_list = DataCoordinates(time=4, channel="DAPI", z=0) + assert iterator.might_produce_coordinates(coord_not_in_list) == False + +if __name__ == "__main__": + pytest.main() \ No newline at end of file From f5a47bf44cd7779838749560c34a453fe24ef660 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Wed, 26 Jun 2024 15:56:26 +0200 Subject: [PATCH 08/20] added many unit tests --- .../new/{acq_events.py => acq_future.py} | 165 +++++------------- pycromanager/acquisition/new/apis/__init__.py | 0 .../new/{ => apis}/data_storage.py | 11 +- .../acquisition/new/{ => apis}/devices.py | 0 .../acquisition/new/base_classes/__init__.py | 0 .../new/base_classes/acq_events.py | 87 +++++++++ pycromanager/acquisition/new/data_coords.py | 35 ++-- pycromanager/acquisition/new/data_handler.py | 134 ++++++++------ pycromanager/acquisition/new/executor.py | 6 +- .../data_storage_implementations.py | 72 +++++++- .../implementations/event_implementations.py | 4 +- .../mm_device_implementations.py | 2 +- .../acquisition/new/test/sandbox_device.py | 2 +- .../new/test/test_acquisition_futures.py | 147 ++++++++++++++++ .../acquisition/new/test/test_data_coords.py | 4 + .../acquisition/new/test/test_data_handler.py | 118 +++++++++++++ .../acquisition/new/test/test_data_storage.py | 53 ++++++ ...t_execution.py => test_event_execution.py} | 2 +- requirements.txt | 3 +- 19 files changed, 641 insertions(+), 204 deletions(-) rename pycromanager/acquisition/new/{acq_events.py => acq_future.py} (61%) create mode 100644 pycromanager/acquisition/new/apis/__init__.py rename pycromanager/acquisition/new/{ => apis}/data_storage.py (92%) rename pycromanager/acquisition/new/{ => apis}/devices.py (100%) create mode 100644 pycromanager/acquisition/new/base_classes/__init__.py create mode 100644 pycromanager/acquisition/new/base_classes/acq_events.py create mode 100644 pycromanager/acquisition/new/test/test_acquisition_futures.py create mode 100644 pycromanager/acquisition/new/test/test_data_handler.py create mode 100644 pycromanager/acquisition/new/test/test_data_storage.py rename pycromanager/acquisition/new/test/{t3st_event_execution.py => test_event_execution.py} (94%) diff --git a/pycromanager/acquisition/new/acq_events.py b/pycromanager/acquisition/new/acq_future.py similarity index 61% rename from pycromanager/acquisition/new/acq_events.py rename to pycromanager/acquisition/new/acq_future.py index 2ae7141d..3645f989 100644 --- a/pycromanager/acquisition/new/acq_events.py +++ b/pycromanager/acquisition/new/acq_future.py @@ -1,28 +1,17 @@ -from typing import Union, List, Tuple, Callable, Dict, Set, Optional, Any, Sequence -import numpy as np -from queue import Queue -from typing import Iterable -from abc import ABC, abstractmethod +from typing import Union, Optional, Any, Dict, Tuple, Sequence, Set import threading -import weakref import warnings - from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator -from pycromanager.acquisition.new.data_storage import DataStorageAPI -from pycromanager.acquisition.new.data_handler import DataHandler - -from pydantic import BaseModel -from pydantic import field_validator +from typing import TYPE_CHECKING -# def atomic_instruction(cls): -# cls.atomic_instruction = True -# return cls - +if TYPE_CHECKING: # avoid circular imports + from pycromanager.acquisition.new.data_handler import DataHandler +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent class AcquisitionFuture: - def __init__(self, event: Union['AcquisitionEvent', 'DataProducingAcquisitionEvent'], data_handler: DataHandler): + def __init__(self, event: Union[AcquisitionEvent, DataProducingAcquisitionEvent], data_handler: "DataHandler"): self._event = event event._set_future(self) # so that the event can notify the future when it is done and when data is acquired self._data_handler = data_handler @@ -31,12 +20,12 @@ def __init__(self, event: Union['AcquisitionEvent', 'DataProducingAcquisitionEve self._event_complete = False self._acquired_data_coordinates: Set[DataCoordinates] = set() self._processed_data_coordinates: Set[DataCoordinates] = set() - self._saved_data_coordinates: Set[DataCoordinates] = set() + self._stored_data_coordinates: Set[DataCoordinates] = set() self._awaited_acquired_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} self._awaited_processed_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} - self._awaited_saved_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} + self._awaited_stored_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} - def _notify_execution_complete(self, exception: Exception): + def _notify_execution_complete(self, exception: Exception = None): """ Notify the future that the event has completed """ @@ -52,41 +41,39 @@ def await_execution(self): while not self._event_complete: self._event_complete_condition.wait() - def _notify_data(self, image_coordinates: DataCoordinates, data, metadata, processed=False, saved=False): + def _notify_data(self, image_coordinates: DataCoordinates, data, metadata, processed=False, stored=False): """ - Notify the future that data has been acquired by a data producing event. This does not mean - the event is done executing. It also does not mean the data has been stored yet. It is simply - in an output queue waiting to be gotten by the image storage/image/processing thread + Called by the data handler to notify the future that data has been acquired/processed/saved + Passes references to the data and metadata, so that if something is waiting on the future + to asynchronously retrieve the data, it is held onto for fast access Args: image_coordinates: The coordinates of the acquired data data: The data itself metadata: Metadata associated with the data processed: Whether the data has been processed - saved: Whether the data has been saved + stored: Whether the data has been saved """ with self._data_notification_condition: # pass the data to the function that is waiting on it - if not processed and not saved: + if not processed and not stored: self._acquired_data_coordinates.add(image_coordinates) - if image_coordinates in self._awaited_acquired_data: + if image_coordinates in self._awaited_acquired_data.keys(): self._awaited_acquired_data[ image_coordinates] = (data if self._awaited_acquired_data[image_coordinates][0] else None, metadata if self._awaited_acquired_data[image_coordinates][1] else None) - elif processed and not saved: + elif processed and not stored: self._processed_data_coordinates.add(image_coordinates) - if image_coordinates in self._awaited_processed_data: + if image_coordinates in self._awaited_processed_data.keys(): self._awaited_processed_data[ image_coordinates] = (data if self._awaited_processed_data[image_coordinates][0] else None, metadata if self._awaited_processed_data[image_coordinates][1] else None) - elif processed and saved: - self._saved_data_coordinates.add(image_coordinates) - if image_coordinates in self._awaited_saved_data: - self._awaited_saved_data[ - image_coordinates] = (data if self._awaited_saved_data[image_coordinates][0] else None, - metadata if self._awaited_saved_data[image_coordinates][1] else None) - else: - raise ValueError("Invalid arguments") + else: # stored + self._stored_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_stored_data.keys(): + self._awaited_stored_data[ + image_coordinates] = (data if self._awaited_stored_data[image_coordinates][0] else None, + metadata if self._awaited_stored_data[image_coordinates][1] else None) self._data_notification_condition.notify_all() def _check_if_coordinates_possible(self, coordinates): @@ -107,7 +94,7 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio DataCoordinatesIterator, Sequence[DataCoordinates], Sequence[Dict[str, Union[int, str]]]]], return_data: bool = False, return_metadata: bool = False, - data_processed: bool = False, data_stored: bool = False): + processed: bool = False, stored: bool = False): """ Block until the event's data is acquired/processed/saved, and optionally return the data/metadata. when waiting for the data to be acquired (i.e. before it is processed), since there is no way to guarantee that @@ -119,10 +106,13 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio objects/dictionaries. If None, this function will block until the next data is acquired/processed/saved return_data: whether to return the data return_metadata: whether to return the metadata - data_processed: whether to wait until data has been processed. If not data processor is in use, + processed: whether to wait until data has been processed. If not data processor is in use, then this parameter has no effect - data_stored: whether to wait for data that has been saved + stored: whether to wait for data that has been stored. If the call to await data occurs before the + data gets passed off to the storage class, then it will be stored in memory and returned immediately. + without having to retrieve """ + # Check if this event produces data if not isinstance(self._event, DataProducingAcquisitionEvent): raise ValueError("This event does not produce data") @@ -140,7 +130,7 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio with self._data_notification_condition: # lock to avoid inconsistencies with the data that is being awaited for data_coordinates in coordinates_iterator: - if not data_processed and not data_stored: + if not processed and not stored: # make sure this is a valid thing to wait for. This can only be done before processing and # storage, because processors and data storage classes may optionally modify the data self._check_if_coordinates_possible(coordinates) @@ -150,14 +140,14 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio self._awaited_acquired_data[coordinates] = (return_data, return_metadata) else: to_read.add(data_coordinates) - elif data_processed and not data_stored: + elif processed and not stored: if data_coordinates not in self._processed_data_coordinates: self._awaited_processed_data[coordinates] = (return_data, return_metadata) else: to_read.add(data_coordinates) else: # data stored - if data_coordinates not in self._saved_data_coordinates: - self._awaited_saved_data[coordinates] = (return_data, return_metadata) + if data_coordinates not in self._stored_data_coordinates: + self._awaited_stored_data[coordinates] = (return_data, return_metadata) else: to_read.add(data_coordinates) @@ -168,11 +158,12 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio result[data_coordinates] = (data if return_data else None, metadata if return_metadata else None) # now that we've gotten all the data from storage that was missed before this method was called, - # proceed to getting all the data was awaited on another thread + # proceed to getting all the data that was awaited on another thread with self._data_notification_condition: # order doesn't matter here because we're just grabbing it all from RAM - if not data_processed and not data_stored: - for data_coordinates in self._awaited_acquired_data.keys(): + if not processed and not stored: + data_coordinates_list = list(self._awaited_acquired_data.keys()) + for data_coordinates in data_coordinates_list: data = return_data while data is True or data is False: # once the data is no longer a boolean, it's the actual data self._data_notification_condition.wait() @@ -180,20 +171,23 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio # remove from temporary storage and put into result result[data_coordinates] = self._awaited_acquired_data.pop(data_coordinates) - # Same thing for other steps in the pipeline - for data_coordinates in self._awaited_processed_data.keys(): + elif processed and not stored: + data_coordinates_list = list(self._awaited_processed_data.keys()) + for data_coordinates in data_coordinates_list: data = return_data while data is True or data is False: self._data_notification_condition.wait() data, metadata = self._awaited_processed_data[data_coordinates] result[data_coordinates] = self._awaited_processed_data.pop(data_coordinates) - for data_coordinates in self._awaited_saved_data.keys(): + else: # data stored + data_coordinates_list = list(self._awaited_stored_data.keys()) + for data_coordinates in data_coordinates_list: data = return_data while data is True or data is False: self._data_notification_condition.wait() - data, metadata = self._awaited_saved_data[data_coordinates] - result[data_coordinates] = self._awaited_saved_data.pop(data_coordinates) + data, metadata = self._awaited_stored_data[data_coordinates] + result[data_coordinates] = self._awaited_stored_data.pop(data_coordinates) # Now package the result up all_data, all_metadata = zip(*result.values()) @@ -208,70 +202,3 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio elif return_metadata: return all_metadata - -class AcquisitionEvent(BaseModel, ABC): - num_retries_on_exception: int = 0 - _exception: Exception = None - _future_weakref: Optional[weakref.ReferenceType[AcquisitionFuture]] = None - - # TODO: want to make this specific to certain attributes - class Config: - arbitrary_types_allowed = True - - @abstractmethod - def execute(self): - """ - Execute the event. This event is called by the executor, and should be overriden by subclasses to implement - the event's functionality - """ - pass - - def _set_future(self, future: AcquisitionFuture): - """ - Called by the executor to set the future associated with this event - """ - # Store this as a weakref so that if user code does not hold a reference to the future, - # it can be garbage collected. The event should not give access to the future to user code - self._future_weakref = weakref.ref(future) - - def _post_execution(self): - """ - Method that is called after the event is executed to update acquisition futures about the event's status. - This is called automatically by the Executor and should not be overriden by subclasses. - - Args: - future (AcquisitionFuture): The future associated with this event - """ - if self._future_weakref is None: - raise ValueError("Event has not been executed yet") - future = self._future_weakref() - if future is not None: - future._notify_execution_complete(self._exception) - - - -class DataProducingAcquisitionEvent(AcquisitionEvent): - """ - Special type of acquisition event that produces data. It must be passed an image_coordinate_iterator - object that generates the coordinates of each piece of data (i.e. image) that will be produced by the event. - For example, {time: 0}, {time: 1}, {time: 2} for a time series acquisition. - """ - _data_handler: DataHandler = None # executor will provide this at runtime - # This is eventually an ImageCoordinatesIterator. If an Iterable[ImageCoordinates] or - # Iterable[Dict[str, Union[int, str]]] is provided, it will be auto-converted to an ImageCoordinatesIterator - image_coordinate_iterator: Union[DataCoordinatesIterator, - Iterable[DataCoordinates], - Iterable[Dict[str, Union[int, str]]]] - - @field_validator('image_coordinate_iterator', mode='before') - def _convert_to_image_coordinates_iterator(cls, v): - return DataCoordinatesIterator.create(v) - - def put_data(self, data_coordinates: DataCoordinates, image: np.ndarray, metadata: Dict): - """ - Put data into the output queue - """ - self._data_handler.put(data_coordinates, image, metadata, self._future_weakref()) - - - diff --git a/pycromanager/acquisition/new/apis/__init__.py b/pycromanager/acquisition/new/apis/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/data_storage.py b/pycromanager/acquisition/new/apis/data_storage.py similarity index 92% rename from pycromanager/acquisition/new/data_storage.py rename to pycromanager/acquisition/new/apis/data_storage.py index be0c1828..1792306f 100644 --- a/pycromanager/acquisition/new/data_storage.py +++ b/pycromanager/acquisition/new/apis/data_storage.py @@ -5,14 +5,14 @@ from typing import Protocol, runtime_checkable, Union, List, Tuple, Dict, Any from pycromanager.acquisition.new.data_coords import DataCoordinates import numpy as np -from pydantic.json import JsonValue - +from pydantic.types import JsonValue @runtime_checkable class DataStorageAPI(Protocol): - # TODO: about these type hints: better to use the dicts only or the DataCoordinates. - # TODO: Also do this with kwargs + # TODO: about these type hints: better to use the dicts only or also include the DataCoordinates? + # DataCoordinates can essentially be used as a dict anyway due to duck typing, so + # maybe its better that other implementations not have to depend on the DataCoordinates class def __contains__(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> bool: """Check if item is in the container.""" ... @@ -63,11 +63,14 @@ def close(self): """ ... + #### Other methods copied from the NDStorage API that possibly could be useful to include in the future #### + # @abstractmethod # def initialize(self, summary_metadata: dict): # """ # Initialize the dataset with summary metadata # """ + # TODO: if implementation, may want to change this global metadata # pass # @abstractmethod diff --git a/pycromanager/acquisition/new/devices.py b/pycromanager/acquisition/new/apis/devices.py similarity index 100% rename from pycromanager/acquisition/new/devices.py rename to pycromanager/acquisition/new/apis/devices.py diff --git a/pycromanager/acquisition/new/base_classes/__init__.py b/pycromanager/acquisition/new/base_classes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/base_classes/acq_events.py b/pycromanager/acquisition/new/base_classes/acq_events.py new file mode 100644 index 00000000..a0ac6468 --- /dev/null +++ b/pycromanager/acquisition/new/base_classes/acq_events.py @@ -0,0 +1,87 @@ +from typing import Union, Tuple, Dict, Set, Optional, Any, Sequence +import numpy as np +from typing import Iterable +from abc import ABC, abstractmethod +import weakref + +from pydantic import BaseModel +from pydantic import field_validator + +from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator + +from typing import TYPE_CHECKING +if TYPE_CHECKING: # avoid circular imports + from pycromanager.acquisition.new.acq_future import AcquisitionFuture + from pycromanager.acquisition.new.data_handler import DataHandler + + +# def atomic_instruction(cls): +# cls.atomic_instruction = True +# return cls + +class AcquisitionEvent(BaseModel, ABC): + num_retries_on_exception: int = 0 + _exception: Exception = None + _future_weakref: Optional[weakref.ReferenceType['AcquisitionFuture']] = None + + # TODO: want to make this specific to certain attributes + class Config: + arbitrary_types_allowed = True + + @abstractmethod + def execute(self): + """ + Execute the event. This event is called by the executor, and should be overriden by subclasses to implement + the event's functionality + """ + pass + + def _set_future(self, future: 'AcquisitionFuture'): + """ + Called by the executor to set the future associated with this event + """ + # Store this as a weakref so that if user code does not hold a reference to the future, + # it can be garbage collected. The event should not give access to the future to user code + self._future_weakref = weakref.ref(future) + + def _post_execution(self): + """ + Method that is called after the event is executed to update acquisition futures about the event's status. + This is called automatically by the Executor and should not be overriden by subclasses. + + Args: + future (AcquisitionFuture): The future associated with this event + """ + if self._future_weakref is None: + raise ValueError("Event has not been executed yet") + future = self._future_weakref() + if future is not None: + future._notify_execution_complete(self._exception) + + + +class DataProducingAcquisitionEvent(AcquisitionEvent): + """ + Special type of acquisition event that produces data. It must be passed an image_coordinate_iterator + object that generates the coordinates of each piece of data (i.e. image) that will be produced by the event. + For example, {time: 0}, {time: 1}, {time: 2} for a time series acquisition. + """ + _data_handler: "DataHandler" = None # executor will provide this at runtime + # This is eventually an ImageCoordinatesIterator. If an Iterable[ImageCoordinates] or + # Iterable[Dict[str, Union[int, str]]] is provided, it will be auto-converted to an ImageCoordinatesIterator + image_coordinate_iterator: Union[DataCoordinatesIterator, + Iterable[DataCoordinates], + Iterable[Dict[str, Union[int, str]]]] + + @field_validator('image_coordinate_iterator', mode='before') + def _convert_to_image_coordinates_iterator(cls, v): + return DataCoordinatesIterator.create(v) + + def put_data(self, data_coordinates: DataCoordinates, image: np.ndarray, metadata: Dict): + """ + Put data into the output queue + """ + self._data_handler.put(data_coordinates, image, metadata, self._future_weakref()) + + + diff --git a/pycromanager/acquisition/new/data_coords.py b/pycromanager/acquisition/new/data_coords.py index 591a29ac..2cddb4e3 100644 --- a/pycromanager/acquisition/new/data_coords.py +++ b/pycromanager/acquisition/new/data_coords.py @@ -1,7 +1,8 @@ from typing import Dict, Union, Optional, Iterator, List, Tuple, Iterable, Sequence, Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, model_validator +from collections.abc import MutableMapping -class DataCoordinates(BaseModel): +class DataCoordinates(BaseModel, MutableMapping): """ Represents the coordinates of a piece of data (conventionally, a single 2D image). This is a convenience wrapper around a dictionary of axis name to axis value where the axis value can be either an integer or a string. @@ -47,6 +48,13 @@ def __init__(self, coordinate_dict: Dict[str, Union[int, str]] = None, if not hasattr(self, key): setattr(self, key, value) + @model_validator(mode="before") + def _set_coordinates(cls, values): + coordinate_dict = values.get('coordinate_dict', {}) + for key, value in coordinate_dict.items(): + if key not in values: + values[key] = value + return values class Config: validate_assignment = True @@ -87,27 +95,20 @@ def __eq__(self, other): def __hash__(self): return hash(frozenset(self.coordinate_dict.items())) - def items(self): - return self.coordinate_dict.items() - - def keys(self): - return self.coordinate_dict.keys() - - def values(self): - return self.coordinate_dict.values() - - def get(self, key, default=None): - return self.coordinate_dict.get(key, default) - - def clear(self): - self.coordinate_dict.clear() - def __len__(self): return len(self.coordinate_dict) def __iter__(self): return iter(self.coordinate_dict) + def __repr__(self) -> str: + # Provide a concise and clear representation + return f"DataCoordinates({self.coordinate_dict})" + + def __str__(self) -> str: + # Provide a user-friendly string representation + return f"DataCoordinates with coordinates: {self.coordinate_dict}" + class DataCoordinatesIterator: @classmethod diff --git a/pycromanager/acquisition/new/data_handler.py b/pycromanager/acquisition/new/data_handler.py index 53fc88dd..f946f7cb 100644 --- a/pycromanager/acquisition/new/data_handler.py +++ b/pycromanager/acquisition/new/data_handler.py @@ -1,24 +1,35 @@ import threading import queue -from typing import Any, Dict, Tuple, Callable, Union, Iterable, Sequence, Optional +from typing import Any, Dict, Tuple, Callable, Union, Sequence, Optional import numpy as np from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.data_storage import DataStorageAPI -from pycromanager.acquisition.new.acq_events import AcquisitionFuture -from pydantic.json import JsonValue +from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.new.acq_future import AcquisitionFuture +from pydantic.types import JsonValue +from dataclasses import dataclass class _PeekableQueue(queue.Queue): def peek(self): - with self.not_empty: + with self.mutex: while not self._qsize(): self.not_empty.wait() - with self.mutex: - return self.queue[0] + return self.queue[0] + +# make a dataclass to hold the data, metadata, future, and boolean flag for whether the data has been processed +@dataclass +class _DataMetadataFutureHolder: + data: np.ndarray + metadata: Dict + future: Optional[AcquisitionFuture] + processed: bool = False + + def upack(self): + return self.data, self.metadata, self.future class DataHandler: """ - Object that handles acquired data while it is waiting to be saved. This object is thread safe and handles + Object that handles acquired data while it is waiting to be saved. This object is thread safe and manages the handoff of images while they are waiting to be saved, providing temporary access to it along the way. This class manages one or two queues/threads, depending on whether a processing function is provided. If a @@ -29,17 +40,15 @@ class DataHandler: # This class must create at least one additional thread (the saving thread) # and may create another for processing data - def __init__(self, storage: DataStorageAPI, process_function: Callable[[DataCoordinates, np.ndarray, JsonValue], Optional[Union[DataCoordinates, np.ndarray, JsonValue, - Sequence[DataCoordinates, np.ndarray, JsonValue]]]] = None): + Tuple[DataCoordinates, np.ndarray, JsonValue]]]] = None): self._storage = storage self._process_function = process_function self._intake_queue = _PeekableQueue() # locks for synchronizing access the queues/dicts - self._intake_lock = threading.Lock() - self._data_metadata_future: Dict[Any, Tuple[np.ndarray, Dict, Optional[AcquisitionFuture]]] = {} + self._data_metadata_future_tuple: Dict[Any, _DataMetadataFutureHolder] = {} self._intake_thread = threading.Thread(target=self._run_intake_thread) self._intake_thread.start() if process_function: @@ -47,6 +56,9 @@ def __init__(self, storage: DataStorageAPI, self._processed_queue = _PeekableQueue() self._storage_thread = threading.Thread(target=self._run_storage_thread) self._storage_thread.start() + else: + self._processed_queue = None + self._storage_thread = None @staticmethod def _unpack_processed_image(processed): @@ -59,8 +71,9 @@ def _unpack_processed_image(processed): def _run_intake_thread(self): while True: if self._process_function: - with self._intake_lock: - coordinates = self._intake_queue.peek() + # don't remove it until it has been processed + coordinates = self._intake_queue.peek() + print('peek success') # shutdown condition if coordinates is None: self._intake_queue.get() @@ -70,28 +83,39 @@ def _run_intake_thread(self): self._processed_queue.put(None) # propagate the shutdown signal break - data, metadata, _ = self._data_metadata_future[coordinates] + data, metadata, future = self._data_metadata_future_tuple[coordinates].upack() processed = self._process_function(coordinates, data, metadata) + + original_coordinates = coordinates + original_data_coordinates_replaced = False # deal with the fact that the processor may return no items, a single item, or a list of items - with self._intake_lock: - with self._processed_lock: - # Move from intake queue to processed or saving queue as appropriate - coordinates = self._intake_queue.get() - # discard old data/metadata because it has now been processed - _, _, future = self._data_metadata_future.pop(coordinates) - if processed is None: - pass # the data was discarded or diverted - elif isinstance(processed, tuple) and not isinstance(processed[0], tuple): # single item - coordinates, data, metadata = self._unpack_processed_image(processed) - self._processed_queue.put(coordinates) - self._data_metadata_future[coordinates] = (data, metadata, future) - future._notify_data(coordinates, data, metadata, processed=True, saved=False) - else: # multiple items - for item in processed: - coordinates, data, metadata = self._unpack_processed_image(item) - self._processed_queue.put(coordinates) - self._data_metadata_future[coordinates] = (data, metadata, future) - future._notify_data(coordinates, data, metadata, processed=True, saved=False) + if processed is None: + pass # the data was discarded or diverted + # Could add callback here to notify the future that the data was discarded + elif isinstance(processed, tuple) and not isinstance(processed[0], tuple): # single item + coordinates, data, metadata = self._unpack_processed_image(processed) + if coordinates == original_coordinates: + original_data_coordinates_replaced = True + self._processed_queue.put(coordinates) + self._data_metadata_future_tuple[coordinates] = _DataMetadataFutureHolder( + data, metadata, future, processed=True) + if future: + future._notify_data(coordinates, data, metadata, processed=True, stored=False) + else: # multiple items + for item in processed: + coordinates, data, metadata = self._unpack_processed_image(item) + if coordinates == original_coordinates: + original_data_coordinates_replaced = True + self._processed_queue.put(coordinates) + self._data_metadata_future_tuple[coordinates] = _DataMetadataFutureHolder( + data, metadata, future, processed=True) + if future: + future._notify_data(coordinates, data, metadata, processed=True, stored=False) + if not original_data_coordinates_replaced: + # if the image processor did not provide a new image with the same coordinates, discard the original + self._data_metadata_future_tuple.pop(original_coordinates) + # remove the item from the intake queue + self._intake_queue.get() else: # transfer to storage thread shutdown = self._transfer_to_storage() @@ -110,21 +134,20 @@ def _transfer_to_storage(self): the source queue is the output queue of the processing function. If there is no processing function, the source queue is the intake queue. """ - # lock = self._processed_lock if self._process_function else self._intake_lock + coordinates = self._processed_queue.peek() if self._process_function else self._intake_queue.peek() if coordinates is None: + # shutdown condition self._processed_queue.get() if self._process_function else self._intake_queue.get() # remove it self._storage.finish() return True else: - data, metadata, future = self._data_metadata_future[coordinates] + data, metadata, future = self._data_metadata_future_tuple[coordinates].upack() self._storage.put(coordinates, data, metadata) # once this returns the storage is responsible for the data - lock = self._processed_lock if self._process_function else self._intake_lock - with lock: - if future: - future._notify_data(coordinates, data, metadata, processed=True, saved=True) - coordinates = self._processed_queue.get() if self._process_function else self._intake_queue.get() - self._data_metadata_future.pop(coordinates) + coordinates = self._processed_queue.get() if self._process_function else self._intake_queue.get() + self._data_metadata_future_tuple.pop(coordinates) + if future: + future._notify_data(coordinates, data, metadata, processed=True, stored=True) return False def join(self): @@ -135,16 +158,20 @@ def join(self): if self._storage_thread: self._storage_thread.join() - def get(self, coordinates: DataCoordinates, return_data=True, return_metadata=True + def get(self, coordinates: DataCoordinates, return_data=True, return_metadata=True, processed=None, ) -> Optional[Tuple[np.ndarray, JsonValue]]: """ Get an image and associated metadata. If they are present, either in the intake queue or the storage queue (if it exists), return them. If not present, get them from the storage object. If not present there, return None """ - data_metadata_future = self._data_metadata_future.get(coordinates, None) + data_metadata_future = self._data_metadata_future_tuple.get(coordinates, None) + if processed is not None: + if processed and data_metadata_future and not data_metadata_future.processed: + # the data is not yet processed, so return None + return None data, metadata = None, None if data_metadata_future: - data, metadata, future = data_metadata_future + data, metadata, future = data_metadata_future.upack() else: # its not currently managed by the data handler, so check the storage object # don't do both if you dont have to because this may be from disk @@ -166,8 +193,17 @@ def put(self, coordinates: Any, image: np.ndarray, metadata: Dict, acquisition_f pipeline. If an acquisition future is provided, it will be notified when the image arrives, is processed, and is stored. """ - with self._intake_lock: - self._intake_queue.put(coordinates) - self._data_metadata_future[coordinates] = (image, metadata, acquisition_future) + # store the data before adding a record of it to the queue, to avoid having to lock anything + self._data_metadata_future_tuple[coordinates] = _DataMetadataFutureHolder( + image, metadata, acquisition_future) + self._intake_queue.put(coordinates) + if acquisition_future: - acquisition_future._notify_data(coordinates, image, metadata, processed=False, saved=False) + acquisition_future._notify_data(coordinates, image, metadata, processed=False, stored=False) + + def finish(self): + """ + Signal to the data handler that no more data will be added. This will cause all threads to initiate shutdown + and call the finish() method of the storage object. + """ + self._intake_queue.put(None) \ No newline at end of file diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/acquisition/new/executor.py index 260b46e1..a593fabc 100644 --- a/pycromanager/acquisition/new/executor.py +++ b/pycromanager/acquisition/new/executor.py @@ -1,18 +1,16 @@ """ Class that executes acquistion events across a pool of threads """ - import threading from collections import deque from typing import Deque import warnings import traceback from pydantic import BaseModel -import time import uuid -from pycromanager.acquisition.new.acq_events import AcquisitionFuture -from pycromanager.acquisition.new.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionFuture +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent class _ExecutionThreadManager(BaseModel): diff --git a/pycromanager/acquisition/new/implementations/data_storage_implementations.py b/pycromanager/acquisition/new/implementations/data_storage_implementations.py index 9f6093c7..02546653 100644 --- a/pycromanager/acquisition/new/implementations/data_storage_implementations.py +++ b/pycromanager/acquisition/new/implementations/data_storage_implementations.py @@ -1,10 +1,72 @@ """ +Adapters for NDTiff and NDRam storage classes +""" +from typing import Union, Dict +from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.new.data_coords import DataCoordinates +from ndstorage import NDRAMDataset, NDTiffDataset +import numpy as np +from pydantic.types import JsonValue -TODO: adapters for NDTiff and NDRam +class NDStorage(DataStorageAPI): + """ + Wrapper class for NDTiffDataset and NDRAMDataset to implement the DataStorageAPI protocol + """ -""" + def __init__(self, directory: str = None, name: str = None, summary_metadata: JsonValue = None): + if directory is None: + self._storage = NDRAMDataset() + else: + self._storage = NDTiffDataset(dataset_path=directory, name=name, writable=True) + if summary_metadata is None: + summary_metadata = {} + self._storage.initialize(summary_metadata) + + def __contains__(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> bool: + """Check if item is in the container.""" + return self._storage.has_image(**data_coordinates) + + def get_data(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> np.ndarray: + """ + Read a single data corresponding to the given coordinates + """ + return self._storage.read_image(**data_coordinates) + + def get_metadata(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> JsonValue: + """ + Read metadata corresponding to the given coordinates + """ + return self._storage.read_metadata(**data_coordinates) + + def put(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]], data: np.ndarray, + metadata: JsonValue): + """ + Add data and corresponding metadata to the dataset. Once this method has been called, the data and metadata + should be immediately available to be read by get_data and get_metadata. For disk-backed storage, this may + require temporarily caching the data in memory until it can be written to disk. + + Parameters + ---------- + data_coordinates : DataCoordinates or dict + Coordinates of the data + data : np.ndarray + Data to be stored + metadata : dict + Metadata associated with the data + """ + self._storage.put_image(data_coordinates, data, metadata) + + def finish(self): + """ + No more data will be added to the dataset. This method should be called after the last call to put() + and makes the dataset read-only. + """ + self._storage.finish() + + def close(self): + """ + Close the dataset, releasing any resources it holds. No more images will be added or requested + """ + self._storage.close() -# Can you just pass in data coords directly cause of Duck typing? -# Maybe implement a protocol in NDTiff to described dict-like objects? -# TODO: may need to implement items() for data coords \ No newline at end of file diff --git a/pycromanager/acquisition/new/implementations/event_implementations.py b/pycromanager/acquisition/new/implementations/event_implementations.py index 197ec847..16353fa3 100644 --- a/pycromanager/acquisition/new/implementations/event_implementations.py +++ b/pycromanager/acquisition/new/implementations/event_implementations.py @@ -3,8 +3,8 @@ """ from typing import Iterable import itertools -from pycromanager.acquisition.new.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent -from pycromanager.acquisition.new.devices import Camera +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.apis.devices import Camera from pycromanager.acquisition.new.data_coords import DataCoordinates diff --git a/pycromanager/acquisition/new/implementations/mm_device_implementations.py b/pycromanager/acquisition/new/implementations/mm_device_implementations.py index 1646b180..0c93db9f 100644 --- a/pycromanager/acquisition/new/implementations/mm_device_implementations.py +++ b/pycromanager/acquisition/new/implementations/mm_device_implementations.py @@ -2,7 +2,7 @@ Implementation of Micro-Manager devices.py in terms of the AcqEng bottom API """ -from pycromanager.acquisition.new.devices import SingleAxisMovable, DoubleAxisMovable, Camera +from pycromanager.acquisition.new.apis.devices import Camera from pycromanager.core import Core import numpy as np import pymmcore diff --git a/pycromanager/acquisition/new/test/sandbox_device.py b/pycromanager/acquisition/new/test/sandbox_device.py index 095eddeb..4f77f71e 100644 --- a/pycromanager/acquisition/new/test/sandbox_device.py +++ b/pycromanager/acquisition/new/test/sandbox_device.py @@ -18,7 +18,7 @@ executor = AcquisitionEventExecutor() -from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataHandler +from pycromanager.acquisition.new.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler num_images = 100 data_output_queue = DataHandler() diff --git a/pycromanager/acquisition/new/test/test_acquisition_futures.py b/pycromanager/acquisition/new/test/test_acquisition_futures.py new file mode 100644 index 00000000..0bc83106 --- /dev/null +++ b/pycromanager/acquisition/new/test/test_acquisition_futures.py @@ -0,0 +1,147 @@ +import threading +import pytest +import numpy as np +from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +from typing import Dict, Any +import time + +# Assuming these are the correct imports based on the provided code +from pycromanager.acquisition.new.data_handler import DataHandler +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.acq_future import AcquisitionFuture + + +class MockDataHandler(DataHandler): + def __init__(self): + self.data_storage = {} + + def put(self, coords: DataCoordinates, image: np.ndarray, metadata: Dict, future: AcquisitionFuture = None): + self.data_storage[coords] = (image, metadata) + + def get(self, coords: DataCoordinates, return_data=True, return_metadata=True, processed=False): + if coords not in self.data_storage: + return None, None + data, metadata = self.data_storage[coords] + return (data if return_data else None, metadata if return_metadata else None) + + +class MockDataProducingAcquisitionEvent(DataProducingAcquisitionEvent): + + def __init__(self): + super().__init__(image_coordinate_iterator=DataCoordinatesIterator.create( + [{"time": 0}, {"time": 1}, {"time": 2}])) + + def execute(self): + pass + +@pytest.fixture +def mock_data_handler(): + return MockDataHandler() + + +@pytest.fixture +def mock_event(): + return MockDataProducingAcquisitionEvent() + + +@pytest.fixture +def acquisition_future(mock_event, mock_data_handler): + return AcquisitionFuture(event=mock_event, data_handler=mock_data_handler) + + +def test_notify_execution_complete(acquisition_future): + """ + Test that the acquisition future is notified when the event is complete + """ + def complete_event(): + time.sleep(0.1) + acquisition_future._notify_execution_complete(None) + + thread = threading.Thread(target=complete_event) + thread.start() + acquisition_future.await_execution() + assert acquisition_future._event_complete + + +def test_notify_data(acquisition_future): + """ + Test that the acquisition future is notified when data is added + """ + coords = DataCoordinates({"time": 1}) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + acquisition_future._notify_data(coords, image, metadata) + assert coords in acquisition_future._acquired_data_coordinates + + +def test_await_data(acquisition_future): + """ Test that the acquisition future can wait for data to be added """ + coords = DataCoordinates({"time": 1}) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + def wait_and_notify(): + # Delay so that the await_data call is made before the data is added it it gets held in RAM + # rather than retrieved from the storage by the data handler + time.sleep(2) + acquisition_future._notify_data(coords, image, metadata) + thread = threading.Thread(target=wait_and_notify) + thread.start() + + data, meta = acquisition_future.await_data(coords, return_data=True, return_metadata=True) + assert np.array_equal(data, image) + assert meta == metadata + + +def test_await_data_processed(acquisition_future): + """ Test that the acquisition future can wait for processed data to be added """ + coords = DataCoordinates(time=1) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + def wait_and_notify(): + # Delay so that the await_data call is made before the data is added it it gets held in RAM + # rather than retrieved from the storage by the data handler + time.sleep(2) + acquisition_future._notify_data(coords, image, metadata, processed=True) + thread = threading.Thread(target=wait_and_notify) + thread.start() + + data, meta = acquisition_future.await_data(coords, return_data=True, return_metadata=True, processed=True) + assert np.array_equal(data, image) + assert meta == metadata + + +def test_await_data_saved(acquisition_future): + coords = DataCoordinates(time=1) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + def wait_and_notify(): + # Delay so that the await_data call is made before the data is added it it gets held in RAM + # rather than retrieved from the storage by the data handler + time.sleep(2) + acquisition_future._notify_data(coords, image, metadata, stored=True) + + thread = threading.Thread(target=wait_and_notify) + thread.start() + + data, meta = acquisition_future.await_data(coords, return_data=True, return_metadata=True, stored=True) + assert np.array_equal(data, image) + assert meta == metadata + + +def test_check_if_coordinates_possible(acquisition_future): + coords = DataCoordinates({"time": 1}) + + try: + acquisition_future._check_if_coordinates_possible(coords) + except ValueError: + pytest.fail("Unexpected ValueError raised") + +def test_check_if_coordinates_not_possible(acquisition_future): + coords = DataCoordinates(time=1, channel='not_possible') + + with pytest.raises(ValueError): + acquisition_future._check_if_coordinates_possible(coords) \ No newline at end of file diff --git a/pycromanager/acquisition/new/test/test_data_coords.py b/pycromanager/acquisition/new/test/test_data_coords.py index e5af1a4a..d7308c6b 100644 --- a/pycromanager/acquisition/new/test/test_data_coords.py +++ b/pycromanager/acquisition/new/test/test_data_coords.py @@ -7,6 +7,10 @@ def test_init_with_dict(): coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "z": 0} +def test_init_with_dict_access_as_attr(): + coords = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) + assert coords.time == 1 + def test_init_with_individual_axes(): coords = DataCoordinates(time=1, channel="DAPI", z=0) assert coords.coordinate_dict == {"time": 1, "channel": "DAPI", "z": 0} diff --git a/pycromanager/acquisition/new/test/test_data_handler.py b/pycromanager/acquisition/new/test/test_data_handler.py new file mode 100644 index 00000000..eca70afa --- /dev/null +++ b/pycromanager/acquisition/new/test/test_data_handler.py @@ -0,0 +1,118 @@ +import time + +import pytest +import numpy as np +from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.new.acq_future import AcquisitionFuture +from typing import Callable, Optional, Union, Sequence, Dict, Tuple, Any + +from pycromanager.acquisition.new.data_handler import DataHandler + + +class MockDataStorage(DataStorageAPI): + def __init__(self): + self.data = {} + self.metadata = {} + self.finished = False + + def put(self, coords: DataCoordinates, image: np.ndarray, metadata: Dict): + self.data[coords] = image + self.metadata[coords] = metadata + + def get_data(self, coords: DataCoordinates) -> np.ndarray: + return self.data.get(coords) + + def get_metadata(self, coords: DataCoordinates) -> Dict: + return self.metadata.get(coords) + + def close(self): + pass + + def finish(self): + self.finished = True + + def __contains__(self, coords: DataCoordinates) -> bool: + return coords in self.data + + +@pytest.fixture +def mock_data_storage(): + return MockDataStorage() + + +@pytest.fixture +def data_handler(mock_data_storage): + return DataHandler(mock_data_storage) + + +def test_data_handler_put_and_get(data_handler): + """ + Test that DataHandler can put and get data correctly. + """ + coords = DataCoordinates({"time": 1, "channel": "DAPI", "z": 0}) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + data_handler.put(coords, image, metadata, None) + retrieved_image, retrieved_metadata = data_handler.get(coords) + + assert np.array_equal(retrieved_image, image) + assert retrieved_metadata == metadata + + +def test_data_handler_processing_function(data_handler, mock_data_storage): + """ + Test that DataHandler can process data using a provided processing function, and that + data_handler.get() returns the processed data not the original data. + """ + def process_function(coords, image, metadata): + return coords, image * 2, metadata + + handler_with_processing = DataHandler(mock_data_storage, process_function) + + coords = DataCoordinates({"time": 1, "channel": "DAPI", "z": 0}) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + handler_with_processing.put(coords, image, metadata, None) + + retrieved = handler_with_processing.get(coords, processed=True) + # wait until the data has been processed + start_time = time.time() + while retrieved is None: + time.sleep(0.05) + retrieved = handler_with_processing.get(coords, processed=True) + if time.time() - start_time > 10: + raise TimeoutError("Data was not processed within 10 seconds") + retrieved_image, retrieved_metadata = retrieved + + assert np.array_equal(retrieved_image, image * 2) + assert retrieved_metadata == metadata + + +def test_data_handler_shutdown(data_handler, mock_data_storage): + """ + Test that DataHandler signals the storage to finish correctly. + """ + data_handler.finish() # Signal to finish + data_handler.join() + + assert mock_data_storage.finished +def test_data_handler_with_acquisition_future(data_handler): + """ + Test that DataHandler interacts correctly with AcquisitionFuture. + """ + + class MockAcquisitionFuture(): + def _notify_data(self, coords, data, metadata, processed, saved): + self.notified = True + + future = MockAcquisitionFuture() + coords = DataCoordinates({"time": 1, "channel": "DAPI", "z": 0}) + image = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + data_handler.put(coords, image, metadata, future) + + assert future.notified diff --git a/pycromanager/acquisition/new/test/test_data_storage.py b/pycromanager/acquisition/new/test/test_data_storage.py new file mode 100644 index 00000000..f34e9169 --- /dev/null +++ b/pycromanager/acquisition/new/test/test_data_storage.py @@ -0,0 +1,53 @@ +import pytest +import numpy as np +from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.new.implementations.data_storage_implementations import NDStorage +from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI + +@pytest.fixture(params=["tiff", "ram"]) +def data_storage(request, tmp_path): + return NDStorage(directory=str(tmp_path)) if request.param == "tiff" else NDStorage() + +def test_fully_implements_protocol(data_storage): + assert isinstance(data_storage, DataStorageAPI), "NDStorage does not fully implement DataStorageAPI" + +def test_contains_integration(data_storage): + data_coordinates = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) + data_storage.put(data_coordinates, np.array([[1, 2], [3, 4]], dtype=np.uint16), {"some": "metadata"}) + + assert data_coordinates in data_storage + +def test_get_data_integration(data_storage): + data_coordinates = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) + expected_data = np.array([[1, 2], [3, 4]], dtype=np.uint16) + data_storage.put(data_coordinates, expected_data, {"some": "metadata"}) + + result = data_storage.get_data(data_coordinates) + assert np.array_equal(result, expected_data) + +def test_get_metadata_integration(data_storage): + data_coordinates = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) + expected_metadata = {"some": "metadata"} + data_storage.put(data_coordinates, np.array([[1, 2], [3, 4]], dtype=np.uint16 ), expected_metadata) + + result = data_storage.get_metadata(data_coordinates) + assert result == expected_metadata + +def test_put_integration(data_storage): + data_coordinates = DataCoordinates(coordinate_dict={"time": 1, "channel": "DAPI", "z": 0}) + data = np.array([[1, 2], [3, 4]], dtype=np.uint16) + metadata = {"some": "metadata"} + + data_storage.put(data_coordinates, data, metadata) + stored_data = data_storage.get_data(data_coordinates) + stored_metadata = data_storage.get_metadata(data_coordinates) + assert np.array_equal(stored_data, data) + assert stored_metadata == metadata + +def test_finish_integration(data_storage): + data_storage.finish() + # Assertions to check if the dataset was marked as read-only can be added if there are methods or attributes to check this + +def test_close_integration(data_storage): + data_storage.close() + # Assertions to check if resources were released can be added if there are methods or attributes to check this diff --git a/pycromanager/acquisition/new/test/t3st_event_execution.py b/pycromanager/acquisition/new/test/test_event_execution.py similarity index 94% rename from pycromanager/acquisition/new/test/t3st_event_execution.py rename to pycromanager/acquisition/new/test/test_event_execution.py index e7c16db4..3a4c29a2 100644 --- a/pycromanager/acquisition/new/test/t3st_event_execution.py +++ b/pycromanager/acquisition/new/test/test_event_execution.py @@ -3,7 +3,7 @@ from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera import os from pycromanager.acquisition.new.executor import AcquisitionEventExecutor -from pycromanager.acquisition.new.acq_events import StartCapture, ReadoutImages, DataHandler +from pycromanager.acquisition.new.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler mm_install_dir = '/Users/henrypinkard/Micro-Manager' diff --git a/requirements.txt b/requirements.txt index cdcde29e..6801d394 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,5 @@ docstring-inheritance pymmcore sortedcontainers pyjavaz>=1.2.1 -wget \ No newline at end of file +wget +pydantic>=2.0.0 \ No newline at end of file From 6b9c9cf63a1ef1365fba52c237a6d4d51684865f Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:39:07 +0200 Subject: [PATCH 09/20] small changes for examples --- pycromanager/acquisition/new/acq_future.py | 4 +- pycromanager/acquisition/new/apis/devices.py | 3 + .../new/base_classes/acq_events.py | 12 +- pycromanager/acquisition/new/data_handler.py | 11 +- pycromanager/acquisition/new/executor.py | 104 +++++++++----- .../implementations/event_implementations.py | 35 ++++- .../mm_device_implementations.py | 11 +- ...event_execution.py => integration_test.py} | 5 +- .../test/integration_tests/camera_tests.py | 94 +++++++++++++ .../acquisition/new/test/sandbox_device.py | 4 +- .../new/test/unit_tests/__init__.py | 0 .../test_acquisition_futures.py | 0 .../test/{ => unit_tests}/test_data_coords.py | 0 .../{ => unit_tests}/test_data_handler.py | 0 .../{ => unit_tests}/test_data_storage.py | 0 .../new/test/unit_tests/test_executor.py | 130 ++++++++++++++++++ 16 files changed, 359 insertions(+), 54 deletions(-) rename pycromanager/acquisition/new/test/{test_event_execution.py => integration_test.py} (94%) create mode 100644 pycromanager/acquisition/new/test/integration_tests/camera_tests.py create mode 100644 pycromanager/acquisition/new/test/unit_tests/__init__.py rename pycromanager/acquisition/new/test/{ => unit_tests}/test_acquisition_futures.py (100%) rename pycromanager/acquisition/new/test/{ => unit_tests}/test_data_coords.py (100%) rename pycromanager/acquisition/new/test/{ => unit_tests}/test_data_handler.py (100%) rename pycromanager/acquisition/new/test/{ => unit_tests}/test_data_storage.py (100%) create mode 100644 pycromanager/acquisition/new/test/unit_tests/test_executor.py diff --git a/pycromanager/acquisition/new/acq_future.py b/pycromanager/acquisition/new/acq_future.py index 3645f989..787222e7 100644 --- a/pycromanager/acquisition/new/acq_future.py +++ b/pycromanager/acquisition/new/acq_future.py @@ -11,10 +11,10 @@ class AcquisitionFuture: - def __init__(self, event: Union[AcquisitionEvent, DataProducingAcquisitionEvent], data_handler: "DataHandler"): + def __init__(self, event: Union[AcquisitionEvent, DataProducingAcquisitionEvent]): self._event = event event._set_future(self) # so that the event can notify the future when it is done and when data is acquired - self._data_handler = data_handler + self._data_handler = event.data_handler if isinstance(event, DataProducingAcquisitionEvent) else None self._event_complete_condition = threading.Condition() self._data_notification_condition = threading.Condition() self._event_complete = False diff --git a/pycromanager/acquisition/new/apis/devices.py b/pycromanager/acquisition/new/apis/devices.py index 2f9dfcd4..d9729da1 100644 --- a/pycromanager/acquisition/new/apis/devices.py +++ b/pycromanager/acquisition/new/apis/devices.py @@ -41,6 +41,9 @@ def start(self) -> None: def stop(self) -> None: ... + def is_stopped(self) -> bool: + ... + def pop_image(self, timeout=None) -> (np.ndarray, dict): """ Get the next image and metadata from the camera buffer. If timeout is None, this function will block until diff --git a/pycromanager/acquisition/new/base_classes/acq_events.py b/pycromanager/acquisition/new/base_classes/acq_events.py index a0ac6468..98f2c5ec 100644 --- a/pycromanager/acquisition/new/base_classes/acq_events.py +++ b/pycromanager/acquisition/new/base_classes/acq_events.py @@ -8,11 +8,11 @@ from pydantic import field_validator from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.acquisition.new.data_handler import DataHandler from typing import TYPE_CHECKING if TYPE_CHECKING: # avoid circular imports from pycromanager.acquisition.new.acq_future import AcquisitionFuture - from pycromanager.acquisition.new.data_handler import DataHandler # def atomic_instruction(cls): @@ -44,16 +44,16 @@ def _set_future(self, future: 'AcquisitionFuture'): # it can be garbage collected. The event should not give access to the future to user code self._future_weakref = weakref.ref(future) - def _post_execution(self): + def _post_execution(self, exception: Optional[Exception] = None): """ Method that is called after the event is executed to update acquisition futures about the event's status. This is called automatically by the Executor and should not be overriden by subclasses. Args: - future (AcquisitionFuture): The future associated with this event + exception: Exception that was raised during execution, if any """ if self._future_weakref is None: - raise ValueError("Event has not been executed yet") + raise Exception("Future not set for event") future = self._future_weakref() if future is not None: future._notify_execution_complete(self._exception) @@ -66,7 +66,7 @@ class DataProducingAcquisitionEvent(AcquisitionEvent): object that generates the coordinates of each piece of data (i.e. image) that will be produced by the event. For example, {time: 0}, {time: 1}, {time: 2} for a time series acquisition. """ - _data_handler: "DataHandler" = None # executor will provide this at runtime + data_handler: DataHandler # This is eventually an ImageCoordinatesIterator. If an Iterable[ImageCoordinates] or # Iterable[Dict[str, Union[int, str]]] is provided, it will be auto-converted to an ImageCoordinatesIterator image_coordinate_iterator: Union[DataCoordinatesIterator, @@ -81,7 +81,7 @@ def put_data(self, data_coordinates: DataCoordinates, image: np.ndarray, metadat """ Put data into the output queue """ - self._data_handler.put(data_coordinates, image, metadata, self._future_weakref()) + self.data_handler.put(data_coordinates, image, metadata, self._future_weakref()) diff --git a/pycromanager/acquisition/new/data_handler.py b/pycromanager/acquisition/new/data_handler.py index f946f7cb..2010377d 100644 --- a/pycromanager/acquisition/new/data_handler.py +++ b/pycromanager/acquisition/new/data_handler.py @@ -5,10 +5,15 @@ from pycromanager.acquisition.new.data_coords import DataCoordinates from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI -from pycromanager.acquisition.new.acq_future import AcquisitionFuture from pydantic.types import JsonValue from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pycromanager.acquisition.new.acq_future import AcquisitionFuture + + class _PeekableQueue(queue.Queue): def peek(self): with self.mutex: @@ -21,7 +26,7 @@ def peek(self): class _DataMetadataFutureHolder: data: np.ndarray metadata: Dict - future: Optional[AcquisitionFuture] + future: Optional["AcquisitionFuture"] processed: bool = False def upack(self): @@ -186,7 +191,7 @@ def get(self, coordinates: DataCoordinates, return_data=True, return_metadata=Tr return data, metadata - def put(self, coordinates: Any, image: np.ndarray, metadata: Dict, acquisition_future: Optional[AcquisitionFuture]): + def put(self, coordinates: Any, image: np.ndarray, metadata: Dict, acquisition_future: Optional["AcquisitionFuture"]): """ Hand off this image to the data handler. It will handle handoff to the storage object and image processing if requested, as well as providing temporary access to the image and metadata as it passes throught this diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/acquisition/new/executor.py index a593fabc..f4f85e91 100644 --- a/pycromanager/acquisition/new/executor.py +++ b/pycromanager/acquisition/new/executor.py @@ -8,9 +8,11 @@ import traceback from pydantic import BaseModel import uuid +from typing import Union, Iterable -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionFuture +from pycromanager.acquisition.new.acq_future import AcquisitionFuture from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.data_handler import DataHandler class _ExecutionThreadManager(BaseModel): @@ -44,7 +46,7 @@ def _run_thread(self): while True: if self._terminate_event.is_set(): return - if self._shutdown_event.is_set() and self.is_free(): + if self._shutdown_event.is_set() and not self._deque: return # Event retrieval loop while event is None: @@ -80,8 +82,8 @@ def _run_thread(self): warnings.warn(f"Exception during event execution, retrying {num_retries} more times") traceback.print_exc() else: - event._exception = e - event._post_execution() # notify futures + traceback.print_exc() + event._post_execution(e) # notify futures with self._addition_condition: self._event_executing = False raise e # re-raise the exception to stop the thread @@ -99,13 +101,7 @@ def submit_event(self, event, prioritize=False): """ Submit an event for execution on this thread. If prioritize is True, the event will be executed before any other events in the queue. - - Returns: - uuid.UUID: A unique identifier for the event, which can be used to check if the event has been executed """ - if event._uuid is not None: - warnings.warn("Event has already been executed. Re-executing may lead to unexpected behavior") - event._uuid = uuid.uuid1() with self._addition_condition: if self._shutdown_event.is_set() or self._terminate_event.is_set(): raise RuntimeError("Cannot submit event to a thread that has been shutdown") @@ -114,7 +110,6 @@ def submit_event(self, event, prioritize=False): else: self._deque.append(event) self._addition_condition.notify_all() - return event._uuid def terminate(self): @@ -136,7 +131,7 @@ def shutdown(self): self._thread.join() -class AcquisitionEventExecutor: +class ExecutionEngine: def __init__(self, num_threads=1): self._threads = [] for _ in range(num_threads): @@ -145,29 +140,72 @@ def __init__(self, num_threads=1): def _start_new_thread(self): self._threads.append(_ExecutionThreadManager()) - def submit_event(self, event, prioritize=False, use_free_thread=False, data_handler: DataHandler = None): + def submit(self, event_or_events: Union[AcquisitionEvent, Iterable[AcquisitionEvent]], + transpile: bool = True, prioritize: bool = False, use_free_thread: bool = False, + data_handler: DataHandler = None) -> Union[AcquisitionFuture, Iterable[AcquisitionFuture]]: """ - Submit an event for execution on one of the active threads. By default, all events will be executed - on a single thread in the order they were submitted. This is the simplest way to prevent concurrency issues - with hardware devices. With thread-safe code, events can be parallelized by submitting them to different threads - using the use_free_thread argument. By default, events will be executed in the order they were submitted, but - if prioritize is set to True, the event will be executed before any other events in the queue on its thread. + Submit one or more acquisition events for execution. + + This method handles the submission of acquisition events to be executed on active threads. It provides + options for event prioritization, thread allocation, and performance optimization. + + Execution Behavior: + - By default, all events are executed on a single thread in submission order to prevent concurrency issues. + - Events can be parallelized across different threads using the 'use_free_thread' parameter. + - Priority execution can be requested using the 'prioritize' parameter. Parameters: - event (AcquisitionEvent): The event to execute - data_storage (DataStorage): The data storage object to put data into if the event produces data - prioritize (bool): If True, the event will be executed before any other events queued on its execution thread - use_free_thread (bool): If True, the event will be executed on a thread that is not currently executing - and has nothing in its queue, creating a new thread if necessary. This is needed, for example, when using - an event to cancel or stop another event that is awaiting a stop signal to be rewritten to the state. If - this is set to False (the default), the event will be executed on the primary thread. - data_handler (DataHandler): The queue to put data into if the event produces data + ---------- + event_or_events : Union[AcquisitionEvent, Iterable[AcquisitionEvent]] + A single AcquisitionEvent or an iterable of AcquisitionEvents to be submitted. + + transpile : bool, optional (default=True) + If True and multiple events are submitted, attempt to optimize them for better performance. + This may result in events being combined or reorganized. + + prioritize : bool, optional (default=False) + If True, execute the event(s) before any others in the queue on its assigned thread. + Useful for system-wide changes affecting other events, like hardware adjustments. + + use_free_thread : bool, optional (default=False) + If True, execute the event(s) on an available thread with an empty queue, creating a new one if necessary. + Useful for operations like cancelling or stopping events awaiting signals. + If False, execute on the primary thread. + + data_handler : DataHandler, optional (default=None) + Object to handle data and metadata produced by DataProducingAcquisitionEvents. + + Returns: + ------- + Union[AcquisitionFuture, Iterable[AcquisitionFuture]] + For a single event: returns a single AcquisitionFuture. + For multiple events: returns an Iterable of AcquisitionFutures. + Note: The number of returned futures may differ from the input if transpilation occurs. + + Notes: + ----- + - Transpilation may optimize multiple events, potentially altering their number or structure. + - Use 'prioritize' for critical system changes that should occur before other queued events. + - 'use_free_thread' is essential for operations that need to run independently, like cancellation events. """ - # check that DataProducingAcquisitionEvents have a data output queue - if isinstance(event, DataProducingAcquisitionEvent) and data_handler is None: - raise ValueError("DataProducingAcquisitionEvent must have a data_output_queue argument") + if isinstance(event_or_events, AcquisitionEvent): + event_or_events = [event_or_events] + + if transpile: + # TODO: transpile events + pass + + futures = tuple(self._submit_single_event(event, use_free_thread, prioritize) + for event in event_or_events) + if len(futures) == 1: + return futures[0] + return futures - future = AcquisitionFuture(event=event, data_handler=data_handler) + def _submit_single_event(self, event: AcquisitionEvent, use_free_thread: bool = False, prioritize: bool = False): + """ + Submit a single event for execution + """ + future = AcquisitionFuture(event=event) if use_free_thread: for thread in self._threads: if thread.is_free(): @@ -180,13 +218,11 @@ def submit_event(self, event, prioritize=False, use_free_thread=False, data_hand return future - - def shutdown(self): """ - Stop all threads and wait for them to finish + Stop all threads managed by this executor and wait for them to finish """ for thread in self._threads: thread.shutdown() for thread in self._threads: - thread.join() \ No newline at end of file + thread.join() diff --git a/pycromanager/acquisition/new/implementations/event_implementations.py b/pycromanager/acquisition/new/implementations/event_implementations.py index 16353fa3..ef636aef 100644 --- a/pycromanager/acquisition/new/implementations/event_implementations.py +++ b/pycromanager/acquisition/new/implementations/event_implementations.py @@ -13,15 +13,15 @@ class ReadoutImages(DataProducingAcquisitionEvent): Readout one or more images (and associated metadata) from a camera Attributes: - num_images (int): The number of images to read out. + num_images (int): The number of images to read out. If None, the readout will continue until the + image_coordinate_iterator is exhausted or the camera is stopped and no more images are available. camera (Camera): The camera object to read images from. image_coordinate_iterator (Iterable[DataCoordinates]): An iterator or list of ImageCoordinates objects, which specify the coordinates of the images that will be read out, should be able to provide at least num_images elements. """ - num_images: int + num_images: int = None camera: Camera - # TODO: maybe specify here that this should run on a seperate thread? def execute(self): image_counter = itertools.count() if self.num_images is None else range(self.num_images) @@ -34,6 +34,9 @@ def execute(self): if image is not None: self.put_data(image_coordinates, image, metadata) break + # check stopping condition + if self.camera.is_stopped(): + break class StartCapture(AcquisitionEvent): @@ -54,3 +57,29 @@ def execute(self): self.camera.stop() raise e + +class StartContinuousCapture(AcquisitionEvent): + """ + Tell data-producing device to start capturing images continuously, until a stop signal is received + """ + camera: Camera + + def execute(self): + """ + Capture images from the camera + """ + try: + self.camera.arm() + self.camera.start() + except Exception as e: + self.camera.stop() + raise e + +class StopCapture(AcquisitionEvent): + """ + Tell data-producing device to start capturing images continuously, until a stop signal is received + """ + camera: Camera + + def execute(self): + self.camera.stop() diff --git a/pycromanager/acquisition/new/implementations/mm_device_implementations.py b/pycromanager/acquisition/new/implementations/mm_device_implementations.py index 0c93db9f..6bcbd68a 100644 --- a/pycromanager/acquisition/new/implementations/mm_device_implementations.py +++ b/pycromanager/acquisition/new/implementations/mm_device_implementations.py @@ -36,6 +36,7 @@ def __init__(self, device_name=None): # This may be removable in the the future with the new camera API if something similar is implemented at the core self._snap_executor = ThreadPoolExecutor(max_workers=1) self._last_snap = None + self._snap_active = False def set_exposure(self, exposure: float) -> None: @@ -58,7 +59,12 @@ def arm(self, frame_count=None) -> None: def start(self) -> None: if self._frame_count == 1: # Execute this on a separate thread because it blocks - self._last_snap = self._snap_executor.submit(lambda : self._core.snap_image()) + def do_snap(): + self._snap_active = True + self._core.snap_image() + self._snap_active = False + + self._last_snap = self._snap_executor.submit(do_snap) elif self._frame_count is None: # set core camera to this camera because there's no version of this call where you specify the camera self._core.set_camera_device(self.device_name) @@ -70,6 +76,9 @@ def stop(self) -> None: # This will stop sequences. There is not way to stop snap_image self._core.stop_sequence_acquisition(self.device_name) + def is_stopped(self) -> bool: + return self._core.is_sequence_running(self.device_name) and not self._snap_active + def pop_image(self, timeout=None) -> (np.ndarray, dict): if self._frame_count != 1: md = pymmcore.Metadata() diff --git a/pycromanager/acquisition/new/test/test_event_execution.py b/pycromanager/acquisition/new/test/integration_test.py similarity index 94% rename from pycromanager/acquisition/new/test/test_event_execution.py rename to pycromanager/acquisition/new/test/integration_test.py index 3a4c29a2..391cd618 100644 --- a/pycromanager/acquisition/new/test/test_event_execution.py +++ b/pycromanager/acquisition/new/test/integration_test.py @@ -2,7 +2,7 @@ from pycromanager.acquisition.new.data_coords import DataCoordinates from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera import os -from pycromanager.acquisition.new.executor import AcquisitionEventExecutor +from pycromanager.acquisition.new.executor import ExecutionEngine from pycromanager.acquisition.new.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler @@ -14,8 +14,7 @@ debug=False) - -executor = AcquisitionEventExecutor() +executor = ExecutionEngine() diff --git a/pycromanager/acquisition/new/test/integration_tests/camera_tests.py b/pycromanager/acquisition/new/test/integration_tests/camera_tests.py new file mode 100644 index 00000000..b1a797da --- /dev/null +++ b/pycromanager/acquisition/new/test/integration_tests/camera_tests.py @@ -0,0 +1,94 @@ +import time + +from pycromanager import start_headless +from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera +import os +from pycromanager.acquisition.new.executor import ExecutionEngine +from pycromanager.acquisition.new.implementations.event_implementations import StartCapture, ReadoutImages, \ + StartContinuousCapture, StopCapture +from pycromanager.acquisition.new.data_handler import DataHandler +from pycromanager.acquisition.new.implementations.data_storage_implementations import NDStorage +import itertools + + +# TODO: make this a pytest startup fixture +mm_install_dir = '/Users/henrypinkard/Micro-Manager' +config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') +start_headless(mm_install_dir, config_file, + buffer_size_mb=1024, max_memory_mb=1024, # set these low for github actions + python_backend=True, + debug=False) + +camera = MicroManagerCamera() +executor = ExecutionEngine() + + + + + + +### Finite sequence +num_images = 100 +storage = NDStorage() +data_handler = DataHandler(storage=storage) + +start_capture_event = StartCapture(num_images=num_images, camera=camera) +readout_images_event = ReadoutImages(num_images=num_images, camera=camera, + image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], + data_handler=data_handler) + +executor.submit([start_capture_event, readout_images_event]) + +image_count = 0 +# TODO: monitor this with notifications + +while not {'time': num_images - 1} in storage: + time.sleep(1) + +print('Finished first one') + + +#### Live mode +storage = NDStorage() +data_handler = DataHandler(storage=storage) + +start_capture_event = StartContinuousCapture(camera=camera) +readout_images_event = ReadoutImages(num_images=num_images, camera=camera, + # TODO change this to infinite + image_coordinate_iterator=(DataCoordinates(time=t) for t in itertools.count()), + data_handler=data_handler) +stop_capture = StopCapture(camera=camera) + +executor.submit([start_capture_event, readout_images_event]) +time.sleep(2) +# Readout images is continuously running on one thread, so need to do this on another thread +executor.submit(stop_capture, use_free_thread=True) + +image_count = 0 +# TODO: monitor this with notifications + +while not {'time': num_images - 1} in storage: + time.sleep(1) + +print('Finished second one') + + +num_images = 1 +storage = NDStorage() +data_handler = DataHandler(storage=storage) + +start_capture_event = StartCapture(num_images=num_images, camera=camera) +readout_images_event = ReadoutImages(num_images=num_images, camera=camera, + image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], + data_handler=data_handler) + +executor.submit([start_capture_event, readout_images_event]) + +image_count = 0 +# TODO: monitor this with notifications + +while not {'time': num_images - 1} in storage: + time.sleep(1) + +print('Finished single image') \ No newline at end of file diff --git a/pycromanager/acquisition/new/test/sandbox_device.py b/pycromanager/acquisition/new/test/sandbox_device.py index 4f77f71e..f31ac6f1 100644 --- a/pycromanager/acquisition/new/test/sandbox_device.py +++ b/pycromanager/acquisition/new/test/sandbox_device.py @@ -14,8 +14,8 @@ camera = MicroManagerCamera() -from pycromanager.acquisition.new.executor import AcquisitionEventExecutor -executor = AcquisitionEventExecutor() +from pycromanager.acquisition.new.executor import ExecutionEngine +executor = ExecutionEngine() from pycromanager.acquisition.new.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler diff --git a/pycromanager/acquisition/new/test/unit_tests/__init__.py b/pycromanager/acquisition/new/test/unit_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/test/test_acquisition_futures.py b/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py similarity index 100% rename from pycromanager/acquisition/new/test/test_acquisition_futures.py rename to pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py diff --git a/pycromanager/acquisition/new/test/test_data_coords.py b/pycromanager/acquisition/new/test/unit_tests/test_data_coords.py similarity index 100% rename from pycromanager/acquisition/new/test/test_data_coords.py rename to pycromanager/acquisition/new/test/unit_tests/test_data_coords.py diff --git a/pycromanager/acquisition/new/test/test_data_handler.py b/pycromanager/acquisition/new/test/unit_tests/test_data_handler.py similarity index 100% rename from pycromanager/acquisition/new/test/test_data_handler.py rename to pycromanager/acquisition/new/test/unit_tests/test_data_handler.py diff --git a/pycromanager/acquisition/new/test/test_data_storage.py b/pycromanager/acquisition/new/test/unit_tests/test_data_storage.py similarity index 100% rename from pycromanager/acquisition/new/test/test_data_storage.py rename to pycromanager/acquisition/new/test/unit_tests/test_data_storage.py diff --git a/pycromanager/acquisition/new/test/unit_tests/test_executor.py b/pycromanager/acquisition/new/test/unit_tests/test_executor.py new file mode 100644 index 00000000..4ccc1ab2 --- /dev/null +++ b/pycromanager/acquisition/new/test/unit_tests/test_executor.py @@ -0,0 +1,130 @@ +import pytest +from unittest.mock import MagicMock +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.executor import ExecutionEngine +from pycromanager.acquisition.new.acq_future import AcquisitionFuture +import threading +import time + + +def create_sync_event(start_event, finish_event): + event = MagicMock(spec=AcquisitionEvent) + event.num_retries_on_exception = 0 + event._uuid = None + event.executed = False + event.executed_time = None + + def execute(): + start_event.set() # Signal that the execution has started + finish_event.wait() # Wait for the signal to finish + event.executed = True + event.executed_time = time.time() + + event.execute.side_effect = execute + event._post_execution = MagicMock() + return event + + +@pytest.fixture +def acquisition_event_executor(): + return ExecutionEngine(num_threads=2) + + +def test_submit_single_event(acquisition_event_executor): + start_event = threading.Event() + finish_event = threading.Event() + event = create_sync_event(start_event, finish_event) + + future = acquisition_event_executor.submit_event(event) + start_event.wait() # Wait for the event to start executing + finish_event.set() # Signal the event to finish + acquisition_event_executor.shutdown() + + assert event.executed + assert isinstance(future, AcquisitionFuture) + + +def test_submit_multiple_events(acquisition_event_executor): + start_event1 = threading.Event() + finish_event1 = threading.Event() + event1 = create_sync_event(start_event1, finish_event1) + + start_event2 = threading.Event() + finish_event2 = threading.Event() + event2 = create_sync_event(start_event2, finish_event2) + + future1 = acquisition_event_executor.submit_event(event1) + future2 = acquisition_event_executor.submit_event(event2) + + start_event1.wait() # Wait for the first event to start executing + finish_event1.set() # Signal the first event to finish + start_event2.wait() # Wait for the second event to start executing + finish_event2.set() # Signal the second event to finish + acquisition_event_executor.shutdown() + + assert event1.executed + assert event2.executed + assert isinstance(future1, AcquisitionFuture) + assert isinstance(future2, AcquisitionFuture) + + +def test_event_prioritization(acquisition_event_executor): + start_event1 = threading.Event() + finish_event1 = threading.Event() + event1 = create_sync_event(start_event1, finish_event1) + + start_event2 = threading.Event() + finish_event2 = threading.Event() + event2 = create_sync_event(start_event2, finish_event2) + + start_event3 = threading.Event() + finish_event3 = threading.Event() + event3 = create_sync_event(start_event3, finish_event3) + + acquisition_event_executor.submit_event(event1) + start_event1.wait() # Wait for the first event to start executing + + acquisition_event_executor.submit_event(event2) + acquisition_event_executor.submit_event(event3, prioritize=True) + + finish_event1.set() + finish_event2.set() + finish_event3.set() + + # wait till all events finished + acquisition_event_executor.shutdown() + + assert event3.executed_time < event2.executed_time + assert event1.executed + assert event2.executed + assert event3.executed + + +def test_use_free_thread_parallel_execution(acquisition_event_executor): + start_event1 = threading.Event() + finish_event1 = threading.Event() + event1 = create_sync_event(start_event1, finish_event1) + + start_event2 = threading.Event() + finish_event2 = threading.Event() + event2 = create_sync_event(start_event2, finish_event2) + + acquisition_event_executor.submit(event1) + acquisition_event_executor.submit(event2, use_free_thread=True) + + # Wait for both events to start executing + assert start_event1.wait(timeout=5) + assert start_event2.wait(timeout=5) + + # Ensure that both events are executing simultaneously + assert start_event1.is_set() + assert start_event2.is_set() + + # Signal both events to finish + finish_event1.set() + finish_event2.set() + + acquisition_event_executor.shutdown() + + assert event1.executed + assert event2.executed \ No newline at end of file From a0ca3ff85bdd74e7252179adce9bfd64d10aa7ee Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:12:49 +0200 Subject: [PATCH 10/20] added metaclass to be able to run method calls on executor --- .../new/base_classes/acq_events.py | 2 +- .../device_types.py} | 32 ++- pycromanager/acquisition/new/data_handler.py | 1 - pycromanager/acquisition/new/executor.py | 203 +++++++++--------- .../implementations/event_implementations.py | 2 +- .../mm_device_implementations.py | 2 +- .../acquisition/new/internal/device.py | 100 +++++++++ .../acquisition/new/test/integration_test.py | 4 +- .../test/unit_tests/test_device_metaclass.py | 131 +++++++++++ 9 files changed, 365 insertions(+), 112 deletions(-) rename pycromanager/acquisition/new/{apis/devices.py => base_classes/device_types.py} (72%) create mode 100644 pycromanager/acquisition/new/internal/device.py create mode 100644 pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py diff --git a/pycromanager/acquisition/new/base_classes/acq_events.py b/pycromanager/acquisition/new/base_classes/acq_events.py index 98f2c5ec..3dea3361 100644 --- a/pycromanager/acquisition/new/base_classes/acq_events.py +++ b/pycromanager/acquisition/new/base_classes/acq_events.py @@ -24,7 +24,7 @@ class AcquisitionEvent(BaseModel, ABC): _exception: Exception = None _future_weakref: Optional[weakref.ReferenceType['AcquisitionFuture']] = None - # TODO: want to make this specific to certain attributes + # TODO: want to make this specific to certain attributes? class Config: arbitrary_types_allowed = True diff --git a/pycromanager/acquisition/new/apis/devices.py b/pycromanager/acquisition/new/base_classes/device_types.py similarity index 72% rename from pycromanager/acquisition/new/apis/devices.py rename to pycromanager/acquisition/new/base_classes/device_types.py index d9729da1..594e8dd8 100644 --- a/pycromanager/acquisition/new/apis/devices.py +++ b/pycromanager/acquisition/new/base_classes/device_types.py @@ -1,31 +1,39 @@ +"""" +Base classes for devices that can be used by the execution engine """ -APIs (protocols) for devices that can be used in the acquisition module -""" -import numpy as np -from typing_extensions import Protocol, runtime_checkable -@runtime_checkable -class SingleAxisMovable(Protocol): +from abc import abstractmethod +from pycromanager.acquisition.new.internal.device import Device + + +class SingleAxisActuator(Device): + + @abstractmethod def move(self, position: float) -> None: ... -@runtime_checkable -class DoubleAxisMovable(Protocol): + +class DoubleAxisActuator(Device): + + @abstractmethod def move(self, x: float, y: float) -> None: ... -@runtime_checkable -class Camera(Protocol): +class Camera(Device): """ Generic class for a camera and the buffer where it stores data """ + # TODO: maybe change these to attributes? + @abstractmethod def set_exposure(self, exposure: float) -> None: ... + @abstractmethod def get_exposure(self) -> float: ... + @abstractmethod def arm(self, frame_count=None) -> None: """ Arms the device before an start command. This optional command validates all the current features for @@ -35,15 +43,19 @@ def arm(self, frame_count=None) -> None: """ ... + @abstractmethod def start(self) -> None: ... + @abstractmethod def stop(self) -> None: ... + @abstractmethod def is_stopped(self) -> bool: ... + @abstractmethod def pop_image(self, timeout=None) -> (np.ndarray, dict): """ Get the next image and metadata from the camera buffer. If timeout is None, this function will block until diff --git a/pycromanager/acquisition/new/data_handler.py b/pycromanager/acquisition/new/data_handler.py index 2010377d..5c5939f0 100644 --- a/pycromanager/acquisition/new/data_handler.py +++ b/pycromanager/acquisition/new/data_handler.py @@ -78,7 +78,6 @@ def _run_intake_thread(self): if self._process_function: # don't remove it until it has been processed coordinates = self._intake_queue.peek() - print('peek success') # shutdown condition if coordinates is None: self._intake_queue.get() diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/acquisition/new/executor.py index f4f85e91..83bbd38f 100644 --- a/pycromanager/acquisition/new/executor.py +++ b/pycromanager/acquisition/new/executor.py @@ -15,6 +15,113 @@ from pycromanager.acquisition.new.data_handler import DataHandler +class ExecutionEngine: + + _instance = None + + def __init__(self, num_threads=1): + self._threads = [] + for _ in range(num_threads): + self._start_new_thread() + self._instance = self + + @classmethod + def get_instance(cls): + if not hasattr(cls, "_instance"): + raise RuntimeError("ExecutionEngine has not been initialized") + return cls._instance + + def _start_new_thread(self): + self._threads.append(_ExecutionThreadManager()) + + def submit(self, event_or_events: Union[AcquisitionEvent, Iterable[AcquisitionEvent]], + transpile: bool = True, prioritize: bool = False, use_free_thread: bool = False, + data_handler: DataHandler = None) -> Union[AcquisitionFuture, Iterable[AcquisitionFuture]]: + """ + Submit one or more acquisition events for execution. + + This method handles the submission of acquisition events to be executed on active threads. It provides + options for event prioritization, thread allocation, and performance optimization. + + Execution Behavior: + - By default, all events are executed on a single thread in submission order to prevent concurrency issues. + - Events can be parallelized across different threads using the 'use_free_thread' parameter. + - Priority execution can be requested using the 'prioritize' parameter. + + Parameters: + ---------- + event_or_events : Union[AcquisitionEvent, Iterable[AcquisitionEvent]] + A single AcquisitionEvent or an iterable of AcquisitionEvents to be submitted. + + transpile : bool, optional (default=True) + If True and multiple events are submitted, attempt to optimize them for better performance. + This may result in events being combined or reorganized. + + prioritize : bool, optional (default=False) + If True, execute the event(s) before any others in the queue on its assigned thread. + Useful for system-wide changes affecting other events, like hardware adjustments. + + use_free_thread : bool, optional (default=False) + If True, execute the event(s) on an available thread with an empty queue, creating a new one if necessary. + Useful for operations like cancelling or stopping events awaiting signals. + If False, execute on the primary thread. + + data_handler : DataHandler, optional (default=None) + Object to handle data and metadata produced by DataProducingAcquisitionEvents. + + Returns: + ------- + Union[AcquisitionFuture, Iterable[AcquisitionFuture]] + For a single event: returns a single AcquisitionFuture. + For multiple events: returns an Iterable of AcquisitionFutures. + Note: The number of returned futures may differ from the input if transpilation occurs. + + Notes: + ----- + - Transpilation may optimize multiple events, potentially altering their number or structure. + - Use 'prioritize' for critical system changes that should occur before other queued events. + - 'use_free_thread' is essential for operations that need to run independently, like cancellation events. + """ + if isinstance(event_or_events, AcquisitionEvent): + event_or_events = [event_or_events] + + if transpile: + # TODO: transpile events + pass + + futures = tuple(self._submit_single_event(event, use_free_thread, prioritize) + for event in event_or_events) + if len(futures) == 1: + return futures[0] + return futures + + def _submit_single_event(self, event: AcquisitionEvent, use_free_thread: bool = False, prioritize: bool = False): + """ + Submit a single event for execution + """ + future = AcquisitionFuture(event=event) + if use_free_thread: + for thread in self._threads: + if thread.is_free(): + thread.submit_event(event) + break + self._start_new_thread() + self._threads[-1].submit_event(event) + else: + self._threads[0].submit_event(event, prioritize=prioritize) + + return future + + def shutdown(self): + """ + Stop all threads managed by this executor and wait for them to finish + """ + for thread in self._threads: + thread.shutdown() + for thread in self._threads: + thread.join() + + class _ExecutionThreadManager(BaseModel): """ Class which manages a single thread that executes events from a queue, one at a time. Events can be added @@ -130,99 +237,3 @@ def shutdown(self): self._addition_condition.notify_all() self._thread.join() - -class ExecutionEngine: - def __init__(self, num_threads=1): - self._threads = [] - for _ in range(num_threads): - self._start_new_thread() - - def _start_new_thread(self): - self._threads.append(_ExecutionThreadManager()) - - def submit(self, event_or_events: Union[AcquisitionEvent, Iterable[AcquisitionEvent]], - transpile: bool = True, prioritize: bool = False, use_free_thread: bool = False, - data_handler: DataHandler = None) -> Union[AcquisitionFuture, Iterable[AcquisitionFuture]]: - """ - Submit one or more acquisition events for execution. - - This method handles the submission of acquisition events to be executed on active threads. It provides - options for event prioritization, thread allocation, and performance optimization. - - Execution Behavior: - - By default, all events are executed on a single thread in submission order to prevent concurrency issues. - - Events can be parallelized across different threads using the 'use_free_thread' parameter. - - Priority execution can be requested using the 'prioritize' parameter. - - Parameters: - ---------- - event_or_events : Union[AcquisitionEvent, Iterable[AcquisitionEvent]] - A single AcquisitionEvent or an iterable of AcquisitionEvents to be submitted. - - transpile : bool, optional (default=True) - If True and multiple events are submitted, attempt to optimize them for better performance. - This may result in events being combined or reorganized. - - prioritize : bool, optional (default=False) - If True, execute the event(s) before any others in the queue on its assigned thread. - Useful for system-wide changes affecting other events, like hardware adjustments. - - use_free_thread : bool, optional (default=False) - If True, execute the event(s) on an available thread with an empty queue, creating a new one if necessary. - Useful for operations like cancelling or stopping events awaiting signals. - If False, execute on the primary thread. - - data_handler : DataHandler, optional (default=None) - Object to handle data and metadata produced by DataProducingAcquisitionEvents. - - Returns: - ------- - Union[AcquisitionFuture, Iterable[AcquisitionFuture]] - For a single event: returns a single AcquisitionFuture. - For multiple events: returns an Iterable of AcquisitionFutures. - Note: The number of returned futures may differ from the input if transpilation occurs. - - Notes: - ----- - - Transpilation may optimize multiple events, potentially altering their number or structure. - - Use 'prioritize' for critical system changes that should occur before other queued events. - - 'use_free_thread' is essential for operations that need to run independently, like cancellation events. - """ - if isinstance(event_or_events, AcquisitionEvent): - event_or_events = [event_or_events] - - if transpile: - # TODO: transpile events - pass - - futures = tuple(self._submit_single_event(event, use_free_thread, prioritize) - for event in event_or_events) - if len(futures) == 1: - return futures[0] - return futures - - def _submit_single_event(self, event: AcquisitionEvent, use_free_thread: bool = False, prioritize: bool = False): - """ - Submit a single event for execution - """ - future = AcquisitionFuture(event=event) - if use_free_thread: - for thread in self._threads: - if thread.is_free(): - thread.submit_event(event) - break - self._start_new_thread() - self._threads[-1].submit_event(event) - else: - self._threads[0].submit_event(event, prioritize=prioritize) - - return future - - def shutdown(self): - """ - Stop all threads managed by this executor and wait for them to finish - """ - for thread in self._threads: - thread.shutdown() - for thread in self._threads: - thread.join() diff --git a/pycromanager/acquisition/new/implementations/event_implementations.py b/pycromanager/acquisition/new/implementations/event_implementations.py index ef636aef..f0206aac 100644 --- a/pycromanager/acquisition/new/implementations/event_implementations.py +++ b/pycromanager/acquisition/new/implementations/event_implementations.py @@ -4,7 +4,7 @@ from typing import Iterable import itertools from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent -from pycromanager.acquisition.new.apis.devices import Camera +from pycromanager.acquisition.new.base_classes.devices import Camera from pycromanager.acquisition.new.data_coords import DataCoordinates diff --git a/pycromanager/acquisition/new/implementations/mm_device_implementations.py b/pycromanager/acquisition/new/implementations/mm_device_implementations.py index 6bcbd68a..26399f03 100644 --- a/pycromanager/acquisition/new/implementations/mm_device_implementations.py +++ b/pycromanager/acquisition/new/implementations/mm_device_implementations.py @@ -2,7 +2,7 @@ Implementation of Micro-Manager devices.py in terms of the AcqEng bottom API """ -from pycromanager.acquisition.new.apis.devices import Camera +from pycromanager.acquisition.new.base_classes.devices import Camera from pycromanager.core import Core import numpy as np import pymmcore diff --git a/pycromanager/acquisition/new/internal/device.py b/pycromanager/acquisition/new/internal/device.py new file mode 100644 index 00000000..bf877356 --- /dev/null +++ b/pycromanager/acquisition/new/internal/device.py @@ -0,0 +1,100 @@ +""" +Base class for all devices that integrates with the execution engine and enables tokenization of device access. +""" +from abc import ABC, ABCMeta +from functools import wraps +from typing import Any, Dict + +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent +from pycromanager.acquisition.new.executor import ExecutionEngine + + +class MethodCallAcquisitionEvent(AcquisitionEvent): + method_name: str + args: tuple + kwargs: Dict[str, Any] + instance: Any + + def execute(self): + method = getattr(self.instance, self.method_name) + return method(*self.args, **self.kwargs) + +class AttrAccessAcquisitionEvent(AcquisitionEvent): + attr_name: str + instance: Any + + def execute(self): + return getattr(self.instance, self.attr_name) + +class AttrSetAcquisitionEvent(AcquisitionEvent): + attr_name: str + value: Any + instance: Any + + def execute(self): + setattr(self.instance, self.attr_name, self.value) + + +class DeviceMetaclass(ABCMeta): + """ + Metaclass for devices that wraps all methods and attributes in the device class to add the ability to + control their execution and access. This has two purposes: + + 1) Add the ability to record all method calls and attribute accesses for tokenization + 2) Add the ability to make all methods and attributes thread-safe by putting them on the Executor + """ + @staticmethod + def wrap_for_executor(attr_name, attr_value): + if hasattr(attr_value, '_wrapped_for_executor'): + return attr_value + + @wraps(attr_value) + def wrapper(self: 'Device', *args: Any, **kwargs: Any) -> Any: + event = MethodCallAcquisitionEvent(method_name=attr_name, args=args, kwargs=kwargs, instance=self) + return self._executor.submit(event).await_execution() + + wrapper._wrapped_for_executor = True + return wrapper + + + def __new__(mcs, name: str, bases: tuple, attrs: dict) -> Any: + new_attrs = {} + for attr_name, attr_value in attrs.items(): + if not attr_name.startswith('_'): + if callable(attr_value): + new_attrs[attr_name] = mcs.wrap_for_executor(attr_name, attr_value) + else: + pass + else: + new_attrs[attr_name] = attr_value + + def __getattr__(self: 'Device', name: str) -> Any: + if name.startswith('__'): + return super().__getattribute__(name) + + event = AttrAccessAcquisitionEvent( + attr_name=name, + instance=self + ) + return self._executor.submit(event).await_execution() + + def __setattr__(self: 'Device', name: str, value: Any) -> None: + if name.startswith('_'): + object.__setattr__(self, name, value) + else: + event = AttrSetAcquisitionEvent( + attr_name=name, + value=value, + instance=self + ) + self._executor.submit(event).await_execution() + + new_attrs['__getattr__'] = __getattr__ + new_attrs['__setattr__'] = __setattr__ + + return super().__new__(mcs, name, bases, new_attrs) + + +class Device(ABC, metaclass=DeviceMetaclass): + def __init__(self): + self._executor: ExecutionEngine = ExecutionEngine.get_instance() diff --git a/pycromanager/acquisition/new/test/integration_test.py b/pycromanager/acquisition/new/test/integration_test.py index 391cd618..a258d800 100644 --- a/pycromanager/acquisition/new/test/integration_test.py +++ b/pycromanager/acquisition/new/test/integration_test.py @@ -29,8 +29,8 @@ image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], output_queue=data_output_queue) -executor.submit_event(start_capture_event) -executor.submit_event(readout_images_event) +executor.submit(start_capture_event) +executor.submit(readout_images_event) image_count = 0 while True: diff --git a/pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py b/pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py new file mode 100644 index 00000000..4f25d856 --- /dev/null +++ b/pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py @@ -0,0 +1,131 @@ +import pytest +from unittest.mock import MagicMock, call +from typing import Any + +# Assuming these are imported from your actual implementation +from pycromanager.acquisition.new.internal.device import (Device, AttrAccessAcquisitionEvent, + AttrSetAcquisitionEvent, MethodCallAcquisitionEvent) + + + +class MockExecutionEngine: + def __init__(self): + self.execute = MagicMock() + self.execute.return_value.await_execution = MagicMock() + + def reset_mock(self): + self.execute.reset_mock() + self.execute.return_value.await_execution.reset_mock() + + def submit(self, event: Any): + return self.execute(event) + + +@pytest.fixture +def mock_executor(): + return MockExecutionEngine() + + +class TestDevice(Device): + def __init__(self, executor): + self._executor = executor + self.public_attr = 0 + + def public_method(self, arg): + return arg * 2 + + def _private_method(self): + return "private" + + +@pytest.fixture +def test_device(mock_executor): + return TestDevice(mock_executor) + + +def test_public_method_call(test_device, mock_executor): + """ + Test that public method calls are intercepted and executed through the executor. + + This test verifies: + 1. The executor's execute method is called once. + 2. A MethodCallAcquisitionEvent is created with the correct method name and arguments. + 3. The await_execution method of the executor is called. + 4. The correct result is returned from the method. + """ + result = test_device.public_method(5) + + # check for only one MethodCallAcquisitionEvent in the calls (other types of events are ok) + # get the list of method calls events + method_call_events = [call[0][0] for call in mock_executor.execute.call_args_list if isinstance(call[0][0], MethodCallAcquisitionEvent)] + assert len(method_call_events) == 1, "MethodCallAcquisitionEvent was not created" + method_call_event = method_call_events[0] + + assert method_call_event is not None, "MethodCallAcquisitionEvent was not created" + assert method_call_event.method_name == 'public_method' + assert method_call_event.args == (5,) + + mock_executor.execute.return_value.await_execution.assert_called() + assert result == mock_executor.execute.return_value.await_execution.return_value + +def test_private_method_call(test_device, mock_executor): + """ + Test that private method calls (methods starting with '_') are not intercepted and run on the executor. + """ + result = test_device._private_method() + + # search through the calls to see if there is one with a MethodCallAcquisitionEvent with this name + method_call_events = [call[0][0] for call in mock_executor.execute.call_args_list if isinstance(call[0][0], MethodCallAcquisitionEvent)] + assert len(method_call_events) == 0, "MethodCallAcquisitionEvent was created for a private method" + + +def test_public_attribute_get(test_device, mock_executor): + """ + Test that getting a public attribute is intercepted and executed through the executor. + + This test verifies: + 1. The executor's execute method is called once. + 2. An AttrAccessAcquisitionEvent is created with the correct attribute name. + """ + _ = test_device.public_attr + + # get the list of attribute access events + attr_access_events = [call[0][0] for call in mock_executor.execute.call_args_list if isinstance(call[0][0], AttrAccessAcquisitionEvent)] + # filter to only AttrAccessAcquisitionEvents with the correct attribute name + attr_access_events = [event for event in attr_access_events if event.attr_name == 'public_attr'] + # check for only one AttrAccessAcquisitionEvent in the calls (other types of events are ok) + assert len(attr_access_events) == 1, "AttrAccessAcquisitionEvent was not created" + + +def test_public_attribute_set(test_device, mock_executor): + """ + Test that setting a public attribute is intercepted and executed through the executor. + + This test verifies: + 1. The executor's execute method is called once. + 2. An AttrSetAcquisitionEvent is created with the correct attribute name and value. + """ + test_device.public_attr = 10 + + # get the list of attribute set events + attr_set_events = [call[0][0] for call in mock_executor.execute.call_args_list if isinstance(call[0][0], AttrSetAcquisitionEvent)] + # valled once on initialization and once on setting the attribute + assert len(attr_set_events) == 2, "AttrSetAcquisitionEvent was not created" + attr_set_event = attr_set_events[1] + + assert attr_set_event.attr_name == 'public_attr' + assert attr_set_event.value == 10 + +def test_private_attribute_access(test_device, mock_executor): + """ + Test that accessing private attributes (attributes starting with '_') is not intercepted. + + This test verifies: + 1. Setting and getting a private attribute does not involve the executor. + 2. The private attribute can be set and retrieved directly. + """ + test_device._private_attr = 20 + assert test_device._private_attr == 20 + +if __name__ == "__main__": + pytest.main() \ No newline at end of file From 948d3217db2c0ce8e5a9b8fd97cc22535ae19cc2 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 2 Jul 2024 12:19:47 +0200 Subject: [PATCH 11/20] misc progress --- pycromanager/acquisition/new/acq_future.py | 149 +++++++++++------- .../new/base_classes/acq_events.py | 60 +++++-- pycromanager/acquisition/new/executor.py | 14 +- .../implementations/event_implementations.py | 4 +- .../unit_tests/test_acquisition_futures.py | 6 +- .../new/test/unit_tests/test_executor.py | 2 +- 6 files changed, 154 insertions(+), 81 deletions(-) diff --git a/pycromanager/acquisition/new/acq_future.py b/pycromanager/acquisition/new/acq_future.py index 787222e7..fc73bf0d 100644 --- a/pycromanager/acquisition/new/acq_future.py +++ b/pycromanager/acquisition/new/acq_future.py @@ -7,14 +7,14 @@ if TYPE_CHECKING: # avoid circular imports from pycromanager.acquisition.new.data_handler import DataHandler -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable class AcquisitionFuture: - def __init__(self, event: Union[AcquisitionEvent, DataProducingAcquisitionEvent]): + def __init__(self, event: AcquisitionEvent): self._event = event event._set_future(self) # so that the event can notify the future when it is done and when data is acquired - self._data_handler = event.data_handler if isinstance(event, DataProducingAcquisitionEvent) else None + self._data_handler = event.data_handler if isinstance(event, DataProducing) else None self._event_complete_condition = threading.Condition() self._data_notification_condition = threading.Condition() self._event_complete = False @@ -24,78 +24,55 @@ def __init__(self, event: Union[AcquisitionEvent, DataProducingAcquisitionEvent] self._awaited_acquired_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} self._awaited_processed_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} self._awaited_stored_data: Dict[DataCoordinates, Tuple[Any, Any]] = {} - - def _notify_execution_complete(self, exception: Exception = None): + self._return_value = None + self._exception = None + + # remove unsupported methods + if not isinstance(self._event, DataProducing): + del self.await_data + if not isinstance(self._event, Stoppable): + del self.stop + if not isinstance(self._event, Abortable): + del self.abort + + def await_execution(self) -> Any: """ - Notify the future that the event has completed - """ - with self._event_complete_condition: - self._event_complete = True - self._event_complete_condition.notify_all() - - def await_execution(self): - """ - Block until the event is complete + Block until the event is complete. If event.execute returns a value, it will be returned here. + If event.execute raises an exception, it will be raised here as well """ with self._event_complete_condition: while not self._event_complete: self._event_complete_condition.wait() + if self._exception is not None: + raise self._exception + return self._return_value - def _notify_data(self, image_coordinates: DataCoordinates, data, metadata, processed=False, stored=False): + def stop(self): """ - Called by the data handler to notify the future that data has been acquired/processed/saved - Passes references to the data and metadata, so that if something is waiting on the future - to asynchronously retrieve the data, it is held onto for fast access - - Args: - image_coordinates: The coordinates of the acquired data - data: The data itself - metadata: Metadata associated with the data - processed: Whether the data has been processed - stored: Whether the data has been saved + (Only for AcquistionEvents that also inherit from Stoppable) + Request the acquisition event to stop its execution. This will return immediately, + but set a flag that the event should stop at the next opportunity. It is up to the implementation of the + event to check this flag and stop its execution. """ - with self._data_notification_condition: - # pass the data to the function that is waiting on it - if not processed and not stored: - self._acquired_data_coordinates.add(image_coordinates) - if image_coordinates in self._awaited_acquired_data.keys(): - self._awaited_acquired_data[ - image_coordinates] = (data if self._awaited_acquired_data[image_coordinates][0] else None, - metadata if self._awaited_acquired_data[image_coordinates][1] else None) - elif processed and not stored: - self._processed_data_coordinates.add(image_coordinates) - if image_coordinates in self._awaited_processed_data.keys(): - self._awaited_processed_data[ - image_coordinates] = (data if self._awaited_processed_data[image_coordinates][0] else None, - metadata if self._awaited_processed_data[image_coordinates][1] else None) - else: # stored - self._stored_data_coordinates.add(image_coordinates) - if image_coordinates in self._awaited_stored_data.keys(): - self._awaited_stored_data[ - image_coordinates] = (data if self._awaited_stored_data[image_coordinates][0] else None, - metadata if self._awaited_stored_data[image_coordinates][1] else None) - self._data_notification_condition.notify_all() + self._event._stop() - def _check_if_coordinates_possible(self, coordinates): + def abort(self): """ - Check if the given coordinates are possible for this event. raise a ValueError if not + (Only for AcquistionEvents that also inherit from Abortable) + Request the acquisition event to abort its execution. This will return immediately, + but set a flag that the event should abort at the next opportunity. It is up to the implementation of the + event to check this flag and abort its execution. """ - possible = self._event.image_coordinate_iterator.might_produce_coordinates(coordinates) - if possible is False: - raise ValueError("This event is not expected to produce the given coordinates") - elif possible is None: - # TODO: suggest a better way to do this (ie a smart generator that knows if produced coordinates are valid) - warnings.warn("This event may not produce the given coordinates") + self._event._abort() - # TODO: write tests for this with returning data, metadata, and both, and neither - # Also try adding in a big delay in the queue or image saving and make sure it still works def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Union[int, str]], DataCoordinatesIterator, Sequence[DataCoordinates], Sequence[Dict[str, Union[int, str]]]]], return_data: bool = False, return_metadata: bool = False, processed: bool = False, stored: bool = False): """ + (Only for AcquisitionEvents that also inherit from DataProducing) Block until the event's data is acquired/processed/saved, and optionally return the data/metadata. when waiting for the data to be acquired (i.e. before it is processed), since there is no way to guarantee that this function is called before the data is acquired, the data may have already been saved and not readily @@ -108,13 +85,13 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio return_metadata: whether to return the metadata processed: whether to wait until data has been processed. If not data processor is in use, then this parameter has no effect - stored: whether to wait for data that has been stored. If the call to await data occurs before the + stored: whether to wait for data that has been stored. If the call to await data occurs before the data gets passed off to the storage class, then it will be stored in memory and returned immediately. without having to retrieve """ # Check if this event produces data - if not isinstance(self._event, DataProducingAcquisitionEvent): + if not isinstance(self._event, DataProducing): raise ValueError("This event does not produce data") coordinates_iterator = DataCoordinatesIterator.create(coordinates) @@ -202,3 +179,59 @@ def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Unio elif return_metadata: return all_metadata + def _notify_execution_complete(self, return_value: Any = None, exception: Exception = None): + """ + Notify the future that the event has completed + """ + with self._event_complete_condition: + self._return_value = return_value + self._exception = exception + self._event_complete = True + self._event_complete_condition.notify_all() + + def _notify_data(self, image_coordinates: DataCoordinates, data, metadata, processed=False, stored=False): + """ + (Only for DataProducing AcquisitionEvents) + Called by the data handler to notify the future that data has been acquired/processed/saved + Passes references to the data and metadata, so that if something is waiting on the future + to asynchronously retrieve the data, it is held onto for fast access + + Args: + image_coordinates: The coordinates of the acquired data + data: The data itself + metadata: Metadata associated with the data + processed: Whether the data has been processed + stored: Whether the data has been saved + """ + with self._data_notification_condition: + # pass the data to the function that is waiting on it + if not processed and not stored: + self._acquired_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_acquired_data.keys(): + self._awaited_acquired_data[ + image_coordinates] = (data if self._awaited_acquired_data[image_coordinates][0] else None, + metadata if self._awaited_acquired_data[image_coordinates][1] else None) + elif processed and not stored: + self._processed_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_processed_data.keys(): + self._awaited_processed_data[ + image_coordinates] = (data if self._awaited_processed_data[image_coordinates][0] else None, + metadata if self._awaited_processed_data[image_coordinates][1] else None) + else: # stored + self._stored_data_coordinates.add(image_coordinates) + if image_coordinates in self._awaited_stored_data.keys(): + self._awaited_stored_data[ + image_coordinates] = (data if self._awaited_stored_data[image_coordinates][0] else None, + metadata if self._awaited_stored_data[image_coordinates][1] else None) + self._data_notification_condition.notify_all() + + def _check_if_coordinates_possible(self, coordinates): + """ + Check if the given coordinates are possible for this event. raise a ValueError if not + """ + possible = self._event.image_coordinate_iterator.might_produce_coordinates(coordinates) + if possible is False: + raise ValueError("This event is not expected to produce the given coordinates") + elif possible is None: + # TODO: suggest a better way to do this (ie a smart generator that knows if produced coordinates are valid) + warnings.warn("This event may not produce the given coordinates") diff --git a/pycromanager/acquisition/new/base_classes/acq_events.py b/pycromanager/acquisition/new/base_classes/acq_events.py index 3dea3361..c971a3b4 100644 --- a/pycromanager/acquisition/new/base_classes/acq_events.py +++ b/pycromanager/acquisition/new/base_classes/acq_events.py @@ -14,22 +14,17 @@ if TYPE_CHECKING: # avoid circular imports from pycromanager.acquisition.new.acq_future import AcquisitionFuture - -# def atomic_instruction(cls): -# cls.atomic_instruction = True -# return cls - class AcquisitionEvent(BaseModel, ABC): num_retries_on_exception: int = 0 - _exception: Exception = None _future_weakref: Optional[weakref.ReferenceType['AcquisitionFuture']] = None + _finished: bool = False # TODO: want to make this specific to certain attributes? class Config: arbitrary_types_allowed = True @abstractmethod - def execute(self): + def execute(self) -> Any: """ Execute the event. This event is called by the executor, and should be overriden by subclasses to implement the event's functionality @@ -44,27 +39,66 @@ def _set_future(self, future: 'AcquisitionFuture'): # it can be garbage collected. The event should not give access to the future to user code self._future_weakref = weakref.ref(future) - def _post_execution(self, exception: Optional[Exception] = None): + def _post_execution(self, return_value: Optional[Any] = None, exception: Optional[Exception] = None, + stopped=False, aborted=False): """ Method that is called after the event is executed to update acquisition futures about the event's status. This is called automatically by the Executor and should not be overriden by subclasses. Args: + return_value: Return value of the event exception: Exception that was raised during execution, if any + stopped: Whether the event was stopped + aborted: Whether the event was aborted """ if self._future_weakref is None: raise Exception("Future not set for event") future = self._future_weakref() if future is not None: - future._notify_execution_complete(self._exception) + future._notify_execution_complete(return_value, exception) + +class Stoppable: + """ + Acquistition events that can be stopped should inherit from this class. They are responsible for checking if + is_stop_requested() returns True and stopping their execution if it does. When stopping, an orderly shutdown + should be performed, unlike when aborting, which should be immediate. The details of what such an orderly + shutdown entails are up to the implementation of the event. + """ + _stop_requested: bool = False + + def _stop(self): + """ + This is handled by the Future + """ + self._stop_requested = True + + def is_stop_requested(self): + return self._stop_requested +class Abortable: + """ + Acquisition events that can be aborted should inherit from this class. They are responsible for checking if + is_abort_requested() returns True and aborting their execution if it does. When aborting, the event should + immediately stop its executiond. + """ + _abort_requested: bool = False + + def _abort(self): + """ + This is handled by the Future + """ + self._abort_requested = True + def is_abort_requested(self): + return self._abort_requested -class DataProducingAcquisitionEvent(AcquisitionEvent): +class DataProducing(BaseModel): """ - Special type of acquisition event that produces data. It must be passed an image_coordinate_iterator - object that generates the coordinates of each piece of data (i.e. image) that will be produced by the event. - For example, {time: 0}, {time: 1}, {time: 2} for a time series acquisition. + Acquisition events that produce data should inherit from this class. They are responsible for putting data + into the output queue. This class provides a method for putting data into the output queue. It must be passed + a DataHandler object that will handle the data, and an image_coordinate_iterator object that generates the + coordinates of each piece of data (i.e. image) that will be produced by the event. For example, {time: 0}, + {time: 1}, {time: 2} for a time series acquisition. """ data_handler: DataHandler # This is eventually an ImageCoordinatesIterator. If an Iterable[ImageCoordinates] or diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/acquisition/new/executor.py index 83bbd38f..c0fa453f 100644 --- a/pycromanager/acquisition/new/executor.py +++ b/pycromanager/acquisition/new/executor.py @@ -11,7 +11,7 @@ from typing import Union, Iterable from pycromanager.acquisition.new.acq_future import AcquisitionFuture -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable from pycromanager.acquisition.new.data_handler import DataHandler @@ -176,8 +176,13 @@ def _run_thread(self): # Event execution loop while True: try: - event.execute() - event._post_execution() # notify futures + if event._finished: + raise RuntimeError("Event was already executed") + return_val = event.execute() + event._finished = True + stopped = isinstance(event, Stoppable) and event.is_stop_requested() + aborted = isinstance(event, Abortable) and event.is_abort_requested() + event._post_execution(return_value=return_val, stopped=stopped, aborted=aborted) # notify futures with self._addition_condition: self._event_executing = False break @@ -190,9 +195,10 @@ def _run_thread(self): traceback.print_exc() else: traceback.print_exc() - event._post_execution(e) # notify futures + event._post_execution(exception=e) # notify futures with self._addition_condition: self._event_executing = False + event._finished = True raise e # re-raise the exception to stop the thread event = None diff --git a/pycromanager/acquisition/new/implementations/event_implementations.py b/pycromanager/acquisition/new/implementations/event_implementations.py index f0206aac..795cc44d 100644 --- a/pycromanager/acquisition/new/implementations/event_implementations.py +++ b/pycromanager/acquisition/new/implementations/event_implementations.py @@ -3,12 +3,12 @@ """ from typing import Iterable import itertools -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing from pycromanager.acquisition.new.base_classes.devices import Camera from pycromanager.acquisition.new.data_coords import DataCoordinates -class ReadoutImages(DataProducingAcquisitionEvent): +class ReadoutImages(AcquisitionEvent, DataProducing): """ Readout one or more images (and associated metadata) from a camera diff --git a/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py b/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py index 0bc83106..a6bd778a 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py +++ b/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py @@ -7,7 +7,7 @@ # Assuming these are the correct imports based on the provided code from pycromanager.acquisition.new.data_handler import DataHandler -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing from pycromanager.acquisition.new.acq_future import AcquisitionFuture @@ -25,7 +25,7 @@ def get(self, coords: DataCoordinates, return_data=True, return_metadata=True, p return (data if return_data else None, metadata if return_metadata else None) -class MockDataProducingAcquisitionEvent(DataProducingAcquisitionEvent): +class MockDataProducing(AcquisitionEvent, DataProducing): def __init__(self): super().__init__(image_coordinate_iterator=DataCoordinatesIterator.create( @@ -41,7 +41,7 @@ def mock_data_handler(): @pytest.fixture def mock_event(): - return MockDataProducingAcquisitionEvent() + return MockDataProducing() @pytest.fixture diff --git a/pycromanager/acquisition/new/test/unit_tests/test_executor.py b/pycromanager/acquisition/new/test/unit_tests/test_executor.py index 4ccc1ab2..86532e2a 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_executor.py +++ b/pycromanager/acquisition/new/test/unit_tests/test_executor.py @@ -1,6 +1,6 @@ import pytest from unittest.mock import MagicMock -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducingAcquisitionEvent +from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing from pycromanager.acquisition.new.executor import ExecutionEngine from pycromanager.acquisition.new.acq_future import AcquisitionFuture import threading From e1404f67c8469efc3d8259902c991b8df1b780af Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 2 Jul 2024 12:56:38 +0200 Subject: [PATCH 12/20] rename to execution engine --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/enhancement.md | 2 +- .github/workflows/build_and_deploy.yml | 18 +++++++++--------- build_automation/update_PycroManagerJava.py | 2 +- misc/PropertyMap.py | 6 +++--- misc/examples/positionTransformation.py | 8 ++++---- misc/positions.py | 8 ++++---- pycromanager/acquisition/RAMStorage_java.py | 2 +- .../acquisition/acq_eng_py/internal/engine.py | 6 +++--- .../acquisition/acquisition_superclass.py | 10 +++++----- .../acquisition/java_backend_acquisitions.py | 6 +++--- .../acquisition/python_backend_acquisitions.py | 2 +- .../new => execution_engine}/__init__.py | 0 .../new => execution_engine}/acq_future.py | 6 +++--- .../new => execution_engine}/apis/__init__.py | 0 .../apis/data_storage.py | 6 +++--- .../base_classes/__init__.py | 0 .../base_classes/acq_events.py | 6 +++--- .../base_classes/device_types.py | 2 +- .../new => execution_engine}/data_coords.py | 0 .../new => execution_engine}/data_handler.py | 10 +++++----- .../new => execution_engine}/executor.py | 8 ++++---- .../implementations/__init__.py | 0 .../data_storage_implementations.py | 4 ++-- .../implementations/event_implementations.py | 10 +++++----- .../mm_device_implementations.py | 4 ++-- .../internal}/__init__.py | 0 .../internal/device.py | 4 ++-- .../test}/__init__.py | 0 .../test/integration_test.py | 8 ++++---- .../test/integration_tests/__init__.py | 0 .../test/integration_tests/camera_tests.py | 12 ++++++------ .../test/sandbox_device.py | 8 ++++---- .../test/unit_tests/__init__.py | 0 .../unit_tests/test_acquisition_futures.py | 8 ++++---- .../test/unit_tests/test_data_coords.py | 2 +- .../test/unit_tests/test_data_handler.py | 8 ++++---- .../test/unit_tests/test_data_storage.py | 6 +++--- .../test/unit_tests/test_device_metaclass.py | 4 ++-- .../test/unit_tests/test_executor.py | 6 +++--- pycromanager/headless.py | 8 ++++---- pycromanager/mm_java_classes.py | 6 +++--- pycromanager/napari_util.py | 6 +++--- pycromanager/test/test_acquisition.py | 2 +- scripts/bridge_tests.py | 2 +- scripts/lightsheet_deskew.py | 6 +++--- scripts/napari_frontend.py | 8 ++++---- 47 files changed, 116 insertions(+), 116 deletions(-) rename pycromanager/{acquisition/new => execution_engine}/__init__.py (100%) rename pycromanager/{acquisition/new => execution_engine}/acq_future.py (97%) rename pycromanager/{acquisition/new => execution_engine}/apis/__init__.py (100%) rename pycromanager/{acquisition/new => execution_engine}/apis/data_storage.py (96%) rename pycromanager/{acquisition/new => execution_engine}/base_classes/__init__.py (100%) rename pycromanager/{acquisition/new => execution_engine}/base_classes/acq_events.py (94%) rename pycromanager/{acquisition/new => execution_engine}/base_classes/device_types.py (96%) rename pycromanager/{acquisition/new => execution_engine}/data_coords.py (100%) rename pycromanager/{acquisition/new => execution_engine}/data_handler.py (95%) rename pycromanager/{acquisition/new => execution_engine}/executor.py (96%) rename pycromanager/{acquisition/new => execution_engine}/implementations/__init__.py (100%) rename pycromanager/{acquisition/new => execution_engine}/implementations/data_storage_implementations.py (94%) rename pycromanager/{acquisition/new => execution_engine}/implementations/event_implementations.py (86%) rename pycromanager/{acquisition/new => execution_engine}/implementations/mm_device_implementations.py (94%) rename pycromanager/{acquisition/new/test => execution_engine/internal}/__init__.py (100%) rename pycromanager/{acquisition/new => execution_engine}/internal/device.py (94%) rename pycromanager/{acquisition/new/test/unit_tests => execution_engine/test}/__init__.py (100%) rename pycromanager/{acquisition/new => execution_engine}/test/integration_test.py (80%) create mode 100644 pycromanager/execution_engine/test/integration_tests/__init__.py rename pycromanager/{acquisition/new => execution_engine}/test/integration_tests/camera_tests.py (82%) rename pycromanager/{acquisition/new => execution_engine}/test/sandbox_device.py (80%) create mode 100644 pycromanager/execution_engine/test/unit_tests/__init__.py rename pycromanager/{acquisition/new => execution_engine}/test/unit_tests/test_acquisition_futures.py (92%) rename pycromanager/{acquisition/new => execution_engine}/test/unit_tests/test_data_coords.py (98%) rename pycromanager/{acquisition/new => execution_engine}/test/unit_tests/test_data_handler.py (91%) rename pycromanager/{acquisition/new => execution_engine}/test/unit_tests/test_data_storage.py (89%) rename pycromanager/{acquisition/new => execution_engine}/test/unit_tests/test_device_metaclass.py (95%) rename pycromanager/{acquisition/new => execution_engine}/test/unit_tests/test_executor.py (93%) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 8e9756b7..31fbb0a2 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -27,7 +27,7 @@ https://github.com/micro-manager/pycro-manager/tree/main/pycromanager/test --> ### Problem diff --git a/.github/workflows/build_and_deploy.yml b/.github/workflows/build_and_deploy.yml index 38197d4b..5f07edd5 100644 --- a/.github/workflows/build_and_deploy.yml +++ b/.github/workflows/build_and_deploy.yml @@ -1,9 +1,9 @@ # If changes to java versions -# Deploy new version of pycromanager java to maven, +# Deploy execution_engine version of pycromanager java to maven, # and then update micro-manager ivy file and make PR # If changes to python version -# await for PR to merge into new MM version -# then publish new version to pypi +# await for PR to merge into execution_engine MM version +# then publish execution_engine version to pypi name: Build and deploy Java and Python components of Pycro-Manager @@ -20,7 +20,7 @@ concurrency: PM_version_update jobs: - # Use a filter to determine whether to deploy new java version + # Use a filter to determine whether to deploy execution_engine java version check-java-version: if: ${{ github.repository == 'micro-manager/pycro-manager' }} runs-on: ubuntu-latest @@ -123,7 +123,7 @@ jobs: repository: micro-manager/pycro-manager ref: main - - name: Wait for new version to be available and update ivy.xml + - name: Wait for execution_engine version to be available and update ivy.xml run: | cd pycro-manager git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" @@ -194,7 +194,7 @@ jobs: git push origin --delete dependency_update_from_pycromanager - # After java deps have updated in MM, time to check if a new python version is needed + # After java deps have updated in MM, time to check if a execution_engine python version is needed check-python-version: if: ${{ github.repository == 'micro-manager/pycro-manager' }} runs-on: ubuntu-latest @@ -213,12 +213,12 @@ jobs: pypi-deploy: - # Once any changes to java have gone into micro-manager, a new version of PM can be deployed to PyPi + # Once any changes to java have gone into micro-manager, a execution_engine version of PM can be deployed to PyPi needs: [check-java-version, mm-update, maven-deploy, check-python-version] - name: Deploy new version to PyPi if needed + name: Deploy execution_engine version to PyPi if needed # Run if - # java update is complete without errors and new version is merged into MM main (or no java update) + # java update is complete without errors and execution_engine version is merged into MM main (or no java update) # and python version changed # weird syntax needed, see: https://github.com/actions/runner/issues/491#issuecomment-850884422 if: ${{ github.repository == 'micro-manager/pycro-manager' && always() && needs.check-python-version.outputs.changed == 'true' && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled')}} diff --git a/build_automation/update_PycroManagerJava.py b/build_automation/update_PycroManagerJava.py index aa8efaab..34e253ca 100644 --- a/build_automation/update_PycroManagerJava.py +++ b/build_automation/update_PycroManagerJava.py @@ -39,7 +39,7 @@ def read_versions(root): for lib_name in main_branch_versions.keys(): old_version = main_branch_versions[lib_name] new_version = updated_versions[lib_name] - print('\t', lib_name, '\t\told: ', old_version, '\tnew: ', new_version) + print('\t', lib_name, '\t\told: ', old_version, '\texecution_engine: ', new_version) if new_version > old_version: if new_version.minor > old_version.minor: minor_version_increased = True diff --git a/misc/PropertyMap.py b/misc/PropertyMap.py index cda27221..8444dc14 100644 --- a/misc/PropertyMap.py +++ b/misc/PropertyMap.py @@ -82,7 +82,7 @@ def encode(self) -> dict: @staticmethod def hook(d: dict): - """Check if a dictionary represents an instance of this class and return a new instance. If this dict does not match + """Check if a dictionary represents an instance of this class and return a execution_engine instance. If this dict does not match the correct pattern then just return the original dict.""" if "type" in d and d["type"] in Property.pTypes.values(): if "scalar" in d: @@ -101,7 +101,7 @@ def encode(self) -> dict: @staticmethod def hook(d: dict): - """Check if a dictionary represents an instance of this class and return a new instance. If this dict does not match + """Check if a dictionary represents an instance of this class and return a execution_engine instance. If this dict does not match the correct pattern then just return the original dict.""" if "type" in d and d["type"] in Property.pTypes.values(): if "array" in d: @@ -239,7 +239,7 @@ def __getitem__(self, idx: typing.Union[slice, int]) -> PropertyMap: if __name__ == "__main__": - """Test that opens a position list file, saves it to a new file and then checks that both versions + """Test that opens a position list file, saves it to a execution_engine file and then checks that both versions are still identical""" path1 = r"PositionList.pos" path2 = r"PositionListOut.pos" diff --git a/misc/examples/positionTransformation.py b/misc/examples/positionTransformation.py index 66ab8c31..cd588b01 100644 --- a/misc/examples/positionTransformation.py +++ b/misc/examples/positionTransformation.py @@ -1,8 +1,8 @@ from misc.positions import PositionList -"""This example demonstrates how to generate new imaging positions from a set of positions after the sample has been picked up and likely shifted or rotated. +"""This example demonstrates how to generate execution_engine imaging positions from a set of positions after the sample has been picked up and likely shifted or rotated. This method relies on measuring a set of reference positions (at least 3) before and after moving the dish. You can then use these positions to generate an -affine transform. This affine transform can then be applied to your original cell positions in order to generate a new set of positions for the same cells. +affine transform. This affine transform can then be applied to your original cell positions in order to generate a execution_engine set of positions for the same cells. In the case of a standard cell culture dish it is best to use the corners of the glass coverslip as your reference locations. """ preTreatRefPositions = PositionList.load( @@ -19,10 +19,10 @@ ) # Load the positions of the cells we are measuring before the dish was removed. postTreatCellPositions = preTreatCellPositions.applyAffineTransform( transformMatrix -) # Transform the cell positions to the new expected locations. +) # Transform the cell positions to the execution_engine expected locations. postTreatCellPositions.save( r"experimentPath\transformedPositions.pos" -) # Save the new positions to a file that can be loaded by Micro-Manager. +) # Save the execution_engine positions to a file that can be loaded by Micro-Manager. preTreatRefPositions.plot() postTreatRefPositions.plot() diff --git a/misc/positions.py b/misc/positions.py index cc7b947a..f3ccaebe 100644 --- a/misc/positions.py +++ b/misc/positions.py @@ -221,7 +221,7 @@ def renameXYStage(self, label: str): """Change the name of the xy stage. Args: - label: The new name for the xy Stage + label: The execution_engine name for the xy Stage """ self.defaultXYStage = label self.getXYPosition().renameStage(label) @@ -230,7 +230,7 @@ def copy(self) -> MultiStagePosition: """Creates a copy fo the object Returns: - A new `MultiStagePosition` object. + A execution_engine `MultiStagePosition` object. """ return copy.deepcopy(self) @@ -340,7 +340,7 @@ def renameStage(self, label) -> PositionList: """Change the name of the xy stage. Args: - label: The new name for the xy Stage + label: The execution_engine name for the xy Stage Returns: A reference to this object @@ -523,7 +523,7 @@ def hover(event): def generateList(data: np.ndarray) -> PositionList: - """Example function to create a brand new position list in python. + """Example function to create a brand execution_engine position list in python. Args: data: An Nx2 array of xy coordinates. These coordinates will be converted to a PositionList which can be diff --git a/pycromanager/acquisition/RAMStorage_java.py b/pycromanager/acquisition/RAMStorage_java.py index 6f54cbec..f64fd6bf 100644 --- a/pycromanager/acquisition/RAMStorage_java.py +++ b/pycromanager/acquisition/RAMStorage_java.py @@ -22,7 +22,7 @@ def close(self): def add_available_axes(self, image_coordinates): """ - The Java RAM storage has received a new image with the given axes. Add these axes to the index. + The Java RAM storage has received a execution_engine image with the given axes. Add these axes to the index. """ self._index_keys.add(frozenset(image_coordinates.items())) # update information about the available images diff --git a/pycromanager/acquisition/acq_eng_py/internal/engine.py b/pycromanager/acquisition/acq_eng_py/internal/engine.py index 01274b79..9d655a20 100644 --- a/pycromanager/acquisition/acq_eng_py/internal/engine.py +++ b/pycromanager/acquisition/acq_eng_py/internal/engine.py @@ -3,7 +3,7 @@ import time import datetime -# from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# from pycromanager.acquisition.execution_engine.acq_events import AcquisitionEvent # TODO AcquisitionEvent = None @@ -88,7 +88,7 @@ def check_for_default_devices(self, event: AcquisitionEvent): # # all events # sequence_event = self.merge_sequence_event(self.sequenced_events) # self.sequenced_events.clear() - # # Add in the start of the new sequence + # # Add in the start of the execution_engine sequence # if not event.is_acquisition_sequence_end_event(): # self.sequenced_events.append(event) # if event.acquisition_.is_debug_mode(): @@ -377,7 +377,7 @@ def change_additional_properties(event): # Compare to last event to see what needs to change if self.last_event is not None and self.last_event.acquisition_ != event.acquisition_: - self.last_event = None # Update all hardware if switching to a new acquisition + self.last_event = None # Update all hardware if switching to a execution_engine acquisition # Other stage devices.py diff --git a/pycromanager/acquisition/acquisition_superclass.py b/pycromanager/acquisition/acquisition_superclass.py index de8fe465..56e6d570 100644 --- a/pycromanager/acquisition/acquisition_superclass.py +++ b/pycromanager/acquisition/acquisition_superclass.py @@ -18,7 +18,7 @@ from queue import Queue from typing import Generator, Dict, Union -# from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# from pycromanager.acquisition.execution_engine.acq_events import AcquisitionEvent AcquisitionEvent = None @@ -110,12 +110,12 @@ def __init__( or two arguments (current event, event_queue) pre_hardware_hook_fn : Callable hook function that will be run just before the hardware is updated before acquiring - a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are + a execution_engine image. In the case of hardware sequencing, it will be run just before a sequence of instructions are dispatched to the hardware. Accepts either one argument (the current acquisition event) or two arguments (current event, event_queue) post_hardware_hook_fn : Callable hook function that will be run just before the hardware is updated before acquiring - a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are + a execution_engine image. In the case of hardware sequencing, it will be run just after a sequence of instructions are dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument (the current acquisition event) or two arguments (current event, event_queue) post_camera_hook_fn : Callable @@ -131,7 +131,7 @@ def __init__( so as to not back up the processing of other notifications. image_saved_fn : Callable function that takes two arguments (the Axes of the image that just finished saving, and the Dataset) - or three arguments (Axes, Dataset and the event_queue) and gets called whenever a new image is written to + or three arguments (Axes, Dataset and the event_queue) and gets called whenever a execution_engine image is written to disk napari_viewer : napari.Viewer Provide a napari viewer to display acquired data in napari (https://napari.org/) rather than the built-in @@ -315,7 +315,7 @@ def _are_events_finished(self): def _add_storage_monitor_fn(self, image_saved_fn=None): """ - Add a callback function that gets called whenever a new image is writtern to disk (for acquisitions in + Add a callback function that gets called whenever a execution_engine image is writtern to disk (for acquisitions in progress only) Parameters diff --git a/pycromanager/acquisition/java_backend_acquisitions.py b/pycromanager/acquisition/java_backend_acquisitions.py index b64763d9..276b2ef0 100644 --- a/pycromanager/acquisition/java_backend_acquisitions.py +++ b/pycromanager/acquisition/java_backend_acquisitions.py @@ -159,7 +159,7 @@ def process_and_sendoff(image_tags_tuple, original_dtype): while True: message = None while message is None: - message = pull_socket.receive(timeout=30, suppress_debug_message=True) # check for new message + message = pull_socket.receive(timeout=30, suppress_debug_message=True) # check for execution_engine message if "special" in message and message["special"] == "finished": pull_socket.close() @@ -304,7 +304,7 @@ def __init__( # Acquistition.start is now deprecated, so this can be removed later # Acquisitions now get started automatically when the first events submitted # but Magellan acquisitons (and probably others that generate their own events) - # will need some new method to submit events only after image processors etc have been added + # will need some execution_engine method to submit events only after image processors etc have been added self._acq.start() self._dataset_disk_location = ( self._acq.get_data_sink().get_storage().get_disk_location() @@ -498,7 +498,7 @@ def _initialize_hooks(self, **kwargs): def _create_remote_acquisition(self, **kwargs): core = ZMQRemoteMMCoreJ(port=self._port, timeout=self._timeout, debug=self._debug) acq_factory = JavaObject("org.micromanager.remote.RemoteAcquisitionFactory", - # create a new socket for it to run on so that it can have blocking calls without interfering with + # create a execution_engine socket for it to run on so that it can have blocking calls without interfering with # the main socket or other internal sockets new_socket=True, port=self._port, args=[core], debug=self._debug, timeout=self._timeout) diff --git a/pycromanager/acquisition/python_backend_acquisitions.py b/pycromanager/acquisition/python_backend_acquisitions.py index 37aa63ea..40c7f90e 100644 --- a/pycromanager/acquisition/python_backend_acquisitions.py +++ b/pycromanager/acquisition/python_backend_acquisitions.py @@ -1,7 +1,7 @@ import warnings from docstring_inheritance import NumpyDocstringInheritanceMeta from pycromanager.acquisition.acquisition_superclass import _validate_acq_events, Acquisition -# from pycromanager.acquisition.new.acq_events import AcquisitionEvent +# from pycromanager.acquisition.execution_engine.acq_events import AcquisitionEvent #TODO: AcquisitionEvent = None from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata diff --git a/pycromanager/acquisition/new/__init__.py b/pycromanager/execution_engine/__init__.py similarity index 100% rename from pycromanager/acquisition/new/__init__.py rename to pycromanager/execution_engine/__init__.py diff --git a/pycromanager/acquisition/new/acq_future.py b/pycromanager/execution_engine/acq_future.py similarity index 97% rename from pycromanager/acquisition/new/acq_future.py rename to pycromanager/execution_engine/acq_future.py index fc73bf0d..73263943 100644 --- a/pycromanager/acquisition/new/acq_future.py +++ b/pycromanager/execution_engine/acq_future.py @@ -1,13 +1,13 @@ from typing import Union, Optional, Any, Dict, Tuple, Sequence, Set import threading import warnings -from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator from typing import TYPE_CHECKING if TYPE_CHECKING: # avoid circular imports - from pycromanager.acquisition.new.data_handler import DataHandler -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable + from pycromanager.acquisition.execution_engine.data_handler import DataHandler +from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable class AcquisitionFuture: diff --git a/pycromanager/acquisition/new/apis/__init__.py b/pycromanager/execution_engine/apis/__init__.py similarity index 100% rename from pycromanager/acquisition/new/apis/__init__.py rename to pycromanager/execution_engine/apis/__init__.py diff --git a/pycromanager/acquisition/new/apis/data_storage.py b/pycromanager/execution_engine/apis/data_storage.py similarity index 96% rename from pycromanager/acquisition/new/apis/data_storage.py rename to pycromanager/execution_engine/apis/data_storage.py index 1792306f..3db4dfa6 100644 --- a/pycromanager/acquisition/new/apis/data_storage.py +++ b/pycromanager/execution_engine/apis/data_storage.py @@ -3,7 +3,7 @@ """ from typing import Protocol, runtime_checkable, Union, List, Tuple, Dict, Any -from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates import numpy as np from pydantic.types import JsonValue @@ -88,7 +88,7 @@ def close(self): # @abstractmethod # def await_new_image(self, timeout=None): # """ -# Wait for a new image to arrive in the dataset +# Wait for a execution_engine image to arrive in the dataset # # Parameters # ---------- @@ -98,7 +98,7 @@ def close(self): # Returns # ------- # bool -# True if a new image has arrived, False if the timeout was reached +# True if a execution_engine image has arrived, False if the timeout was reached # """ # pass # diff --git a/pycromanager/acquisition/new/base_classes/__init__.py b/pycromanager/execution_engine/base_classes/__init__.py similarity index 100% rename from pycromanager/acquisition/new/base_classes/__init__.py rename to pycromanager/execution_engine/base_classes/__init__.py diff --git a/pycromanager/acquisition/new/base_classes/acq_events.py b/pycromanager/execution_engine/base_classes/acq_events.py similarity index 94% rename from pycromanager/acquisition/new/base_classes/acq_events.py rename to pycromanager/execution_engine/base_classes/acq_events.py index c971a3b4..3721107d 100644 --- a/pycromanager/acquisition/new/base_classes/acq_events.py +++ b/pycromanager/execution_engine/base_classes/acq_events.py @@ -7,12 +7,12 @@ from pydantic import BaseModel from pydantic import field_validator -from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator -from pycromanager.acquisition.new.data_handler import DataHandler +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.acquisition.execution_engine.data_handler import DataHandler from typing import TYPE_CHECKING if TYPE_CHECKING: # avoid circular imports - from pycromanager.acquisition.new.acq_future import AcquisitionFuture + from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture class AcquisitionEvent(BaseModel, ABC): num_retries_on_exception: int = 0 diff --git a/pycromanager/acquisition/new/base_classes/device_types.py b/pycromanager/execution_engine/base_classes/device_types.py similarity index 96% rename from pycromanager/acquisition/new/base_classes/device_types.py rename to pycromanager/execution_engine/base_classes/device_types.py index 594e8dd8..597176f1 100644 --- a/pycromanager/acquisition/new/base_classes/device_types.py +++ b/pycromanager/execution_engine/base_classes/device_types.py @@ -3,7 +3,7 @@ """ from abc import abstractmethod -from pycromanager.acquisition.new.internal.device import Device +from pycromanager.execution_engine.internal.device import Device class SingleAxisActuator(Device): diff --git a/pycromanager/acquisition/new/data_coords.py b/pycromanager/execution_engine/data_coords.py similarity index 100% rename from pycromanager/acquisition/new/data_coords.py rename to pycromanager/execution_engine/data_coords.py diff --git a/pycromanager/acquisition/new/data_handler.py b/pycromanager/execution_engine/data_handler.py similarity index 95% rename from pycromanager/acquisition/new/data_handler.py rename to pycromanager/execution_engine/data_handler.py index 5c5939f0..9143a05d 100644 --- a/pycromanager/acquisition/new/data_handler.py +++ b/pycromanager/execution_engine/data_handler.py @@ -3,15 +3,15 @@ from typing import Any, Dict, Tuple, Callable, Union, Sequence, Optional import numpy as np -from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.apis.data_storage import DataStorageAPI from pydantic.types import JsonValue from dataclasses import dataclass from typing import TYPE_CHECKING if TYPE_CHECKING: - from pycromanager.acquisition.new.acq_future import AcquisitionFuture + from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture class _PeekableQueue(queue.Queue): @@ -82,7 +82,7 @@ def _run_intake_thread(self): if coordinates is None: self._intake_queue.get() # TODO: it would be nice to give a signal to the image processor to shut down - # probably could do this by adding a new protocol that can be checked + # probably could do this by adding a execution_engine protocol that can be checked # to allow backwards compatibility self._processed_queue.put(None) # propagate the shutdown signal break @@ -116,7 +116,7 @@ def _run_intake_thread(self): if future: future._notify_data(coordinates, data, metadata, processed=True, stored=False) if not original_data_coordinates_replaced: - # if the image processor did not provide a new image with the same coordinates, discard the original + # if the image processor did not provide a execution_engine image with the same coordinates, discard the original self._data_metadata_future_tuple.pop(original_coordinates) # remove the item from the intake queue self._intake_queue.get() diff --git a/pycromanager/acquisition/new/executor.py b/pycromanager/execution_engine/executor.py similarity index 96% rename from pycromanager/acquisition/new/executor.py rename to pycromanager/execution_engine/executor.py index c0fa453f..f1f34c47 100644 --- a/pycromanager/acquisition/new/executor.py +++ b/pycromanager/execution_engine/executor.py @@ -10,9 +10,9 @@ import uuid from typing import Union, Iterable -from pycromanager.acquisition.new.acq_future import AcquisitionFuture -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable -from pycromanager.acquisition.new.data_handler import DataHandler +from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture +from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable +from pycromanager.acquisition.execution_engine.data_handler import DataHandler class ExecutionEngine: @@ -62,7 +62,7 @@ def submit(self, event_or_events: Union[AcquisitionEvent, Iterable[AcquisitionEv Useful for system-wide changes affecting other events, like hardware adjustments. use_free_thread : bool, optional (default=False) - If True, execute the event(s) on an available thread with an empty queue, creating a new one if necessary. + If True, execute the event(s) on an available thread with an empty queue, creating a execution_engine one if necessary. Useful for operations like cancelling or stopping events awaiting signals. If False, execute on the primary thread. diff --git a/pycromanager/acquisition/new/implementations/__init__.py b/pycromanager/execution_engine/implementations/__init__.py similarity index 100% rename from pycromanager/acquisition/new/implementations/__init__.py rename to pycromanager/execution_engine/implementations/__init__.py diff --git a/pycromanager/acquisition/new/implementations/data_storage_implementations.py b/pycromanager/execution_engine/implementations/data_storage_implementations.py similarity index 94% rename from pycromanager/acquisition/new/implementations/data_storage_implementations.py rename to pycromanager/execution_engine/implementations/data_storage_implementations.py index 02546653..bf63407e 100644 --- a/pycromanager/acquisition/new/implementations/data_storage_implementations.py +++ b/pycromanager/execution_engine/implementations/data_storage_implementations.py @@ -2,8 +2,8 @@ Adapters for NDTiff and NDRam storage classes """ from typing import Union, Dict -from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI -from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates from ndstorage import NDRAMDataset, NDTiffDataset import numpy as np from pydantic.types import JsonValue diff --git a/pycromanager/acquisition/new/implementations/event_implementations.py b/pycromanager/execution_engine/implementations/event_implementations.py similarity index 86% rename from pycromanager/acquisition/new/implementations/event_implementations.py rename to pycromanager/execution_engine/implementations/event_implementations.py index 795cc44d..828d6426 100644 --- a/pycromanager/acquisition/new/implementations/event_implementations.py +++ b/pycromanager/execution_engine/implementations/event_implementations.py @@ -3,9 +3,9 @@ """ from typing import Iterable import itertools -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing -from pycromanager.acquisition.new.base_classes.devices import Camera -from pycromanager.acquisition.new.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing +from pycromanager.acquisition.execution_engine.base_classes.devices import Camera +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates class ReadoutImages(AcquisitionEvent, DataProducing): @@ -28,8 +28,8 @@ def execute(self): for image_number, image_coordinates in zip(image_counter, self.image_coordinate_iterator): while True: # TODO: read from state to check for cancel condition - # this can be made more efficient in the future with a new image buffer that provides callbacks - # on a new image recieved so that polling can be avoided + # this can be made more efficient in the future with a execution_engine image buffer that provides callbacks + # on a execution_engine image recieved so that polling can be avoided image, metadata = self.camera.pop_image(timeout=0.01) # only block for 10 ms so stop event can be checked if image is not None: self.put_data(image_coordinates, image, metadata) diff --git a/pycromanager/acquisition/new/implementations/mm_device_implementations.py b/pycromanager/execution_engine/implementations/mm_device_implementations.py similarity index 94% rename from pycromanager/acquisition/new/implementations/mm_device_implementations.py rename to pycromanager/execution_engine/implementations/mm_device_implementations.py index 26399f03..223e8030 100644 --- a/pycromanager/acquisition/new/implementations/mm_device_implementations.py +++ b/pycromanager/execution_engine/implementations/mm_device_implementations.py @@ -2,7 +2,7 @@ Implementation of Micro-Manager devices.py in terms of the AcqEng bottom API """ -from pycromanager.acquisition.new.base_classes.devices import Camera +from pycromanager.acquisition.execution_engine.base_classes.devices import Camera from pycromanager.core import Core import numpy as np import pymmcore @@ -33,7 +33,7 @@ def __init__(self, device_name=None): self.device_name = device_name # Make a thread to execute calls to snap asynchronously - # This may be removable in the the future with the new camera API if something similar is implemented at the core + # This may be removable in the the future with the execution_engine camera API if something similar is implemented at the core self._snap_executor = ThreadPoolExecutor(max_workers=1) self._last_snap = None self._snap_active = False diff --git a/pycromanager/acquisition/new/test/__init__.py b/pycromanager/execution_engine/internal/__init__.py similarity index 100% rename from pycromanager/acquisition/new/test/__init__.py rename to pycromanager/execution_engine/internal/__init__.py diff --git a/pycromanager/acquisition/new/internal/device.py b/pycromanager/execution_engine/internal/device.py similarity index 94% rename from pycromanager/acquisition/new/internal/device.py rename to pycromanager/execution_engine/internal/device.py index bf877356..dc563978 100644 --- a/pycromanager/acquisition/new/internal/device.py +++ b/pycromanager/execution_engine/internal/device.py @@ -5,8 +5,8 @@ from functools import wraps from typing import Any, Dict -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent -from pycromanager.acquisition.new.executor import ExecutionEngine +from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent +from pycromanager.acquisition.execution_engine.executor import ExecutionEngine class MethodCallAcquisitionEvent(AcquisitionEvent): diff --git a/pycromanager/acquisition/new/test/unit_tests/__init__.py b/pycromanager/execution_engine/test/__init__.py similarity index 100% rename from pycromanager/acquisition/new/test/unit_tests/__init__.py rename to pycromanager/execution_engine/test/__init__.py diff --git a/pycromanager/acquisition/new/test/integration_test.py b/pycromanager/execution_engine/test/integration_test.py similarity index 80% rename from pycromanager/acquisition/new/test/integration_test.py rename to pycromanager/execution_engine/test/integration_test.py index a258d800..4348f5c8 100644 --- a/pycromanager/acquisition/new/test/integration_test.py +++ b/pycromanager/execution_engine/test/integration_test.py @@ -1,9 +1,9 @@ from pycromanager import start_headless -from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.implementations.mm_device_implementations import MicroManagerCamera import os -from pycromanager.acquisition.new.executor import ExecutionEngine -from pycromanager.acquisition.new.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler +from pycromanager.acquisition.execution_engine.executor import ExecutionEngine +from pycromanager.acquisition.execution_engine.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler mm_install_dir = '/Users/henrypinkard/Micro-Manager' diff --git a/pycromanager/execution_engine/test/integration_tests/__init__.py b/pycromanager/execution_engine/test/integration_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/test/integration_tests/camera_tests.py b/pycromanager/execution_engine/test/integration_tests/camera_tests.py similarity index 82% rename from pycromanager/acquisition/new/test/integration_tests/camera_tests.py rename to pycromanager/execution_engine/test/integration_tests/camera_tests.py index b1a797da..28530a9b 100644 --- a/pycromanager/acquisition/new/test/integration_tests/camera_tests.py +++ b/pycromanager/execution_engine/test/integration_tests/camera_tests.py @@ -1,14 +1,14 @@ import time from pycromanager import start_headless -from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.implementations.mm_device_implementations import MicroManagerCamera import os -from pycromanager.acquisition.new.executor import ExecutionEngine -from pycromanager.acquisition.new.implementations.event_implementations import StartCapture, ReadoutImages, \ +from pycromanager.acquisition.execution_engine.executor import ExecutionEngine +from pycromanager.acquisition.execution_engine.implementations.event_implementations import StartCapture, ReadoutImages, \ StartContinuousCapture, StopCapture -from pycromanager.acquisition.new.data_handler import DataHandler -from pycromanager.acquisition.new.implementations.data_storage_implementations import NDStorage +from pycromanager.acquisition.execution_engine.data_handler import DataHandler +from pycromanager.acquisition.execution_engine.implementations.data_storage_implementations import NDStorage import itertools diff --git a/pycromanager/acquisition/new/test/sandbox_device.py b/pycromanager/execution_engine/test/sandbox_device.py similarity index 80% rename from pycromanager/acquisition/new/test/sandbox_device.py rename to pycromanager/execution_engine/test/sandbox_device.py index f31ac6f1..00292da9 100644 --- a/pycromanager/acquisition/new/test/sandbox_device.py +++ b/pycromanager/execution_engine/test/sandbox_device.py @@ -1,6 +1,6 @@ from pycromanager import start_headless -from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.implementations.mm_device_implementations import MicroManagerCamera +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.implementations.mm_device_implementations import MicroManagerCamera import os mm_install_dir = '/Users/henrypinkard/Micro-Manager' @@ -14,11 +14,11 @@ camera = MicroManagerCamera() -from pycromanager.acquisition.new.executor import ExecutionEngine +from pycromanager.acquisition.execution_engine.executor import ExecutionEngine executor = ExecutionEngine() -from pycromanager.acquisition.new.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler +from pycromanager.acquisition.execution_engine.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler num_images = 100 data_output_queue = DataHandler() diff --git a/pycromanager/execution_engine/test/unit_tests/__init__.py b/pycromanager/execution_engine/test/unit_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py b/pycromanager/execution_engine/test/unit_tests/test_acquisition_futures.py similarity index 92% rename from pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py rename to pycromanager/execution_engine/test/unit_tests/test_acquisition_futures.py index a6bd778a..a214e6cc 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_acquisition_futures.py +++ b/pycromanager/execution_engine/test/unit_tests/test_acquisition_futures.py @@ -1,14 +1,14 @@ import threading import pytest import numpy as np -from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator from typing import Dict, Any import time # Assuming these are the correct imports based on the provided code -from pycromanager.acquisition.new.data_handler import DataHandler -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing -from pycromanager.acquisition.new.acq_future import AcquisitionFuture +from pycromanager.acquisition.execution_engine.data_handler import DataHandler +from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing +from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture class MockDataHandler(DataHandler): diff --git a/pycromanager/acquisition/new/test/unit_tests/test_data_coords.py b/pycromanager/execution_engine/test/unit_tests/test_data_coords.py similarity index 98% rename from pycromanager/acquisition/new/test/unit_tests/test_data_coords.py rename to pycromanager/execution_engine/test/unit_tests/test_data_coords.py index d7308c6b..53e04f11 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_data_coords.py +++ b/pycromanager/execution_engine/test/unit_tests/test_data_coords.py @@ -1,6 +1,6 @@ import pytest from pydantic import ValidationError -from pycromanager.acquisition.new.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator import numpy as np def test_init_with_dict(): diff --git a/pycromanager/acquisition/new/test/unit_tests/test_data_handler.py b/pycromanager/execution_engine/test/unit_tests/test_data_handler.py similarity index 91% rename from pycromanager/acquisition/new/test/unit_tests/test_data_handler.py rename to pycromanager/execution_engine/test/unit_tests/test_data_handler.py index eca70afa..27be6d7f 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_data_handler.py +++ b/pycromanager/execution_engine/test/unit_tests/test_data_handler.py @@ -2,12 +2,12 @@ import pytest import numpy as np -from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI -from pycromanager.acquisition.new.acq_future import AcquisitionFuture +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture from typing import Callable, Optional, Union, Sequence, Dict, Tuple, Any -from pycromanager.acquisition.new.data_handler import DataHandler +from pycromanager.acquisition.execution_engine.data_handler import DataHandler class MockDataStorage(DataStorageAPI): diff --git a/pycromanager/acquisition/new/test/unit_tests/test_data_storage.py b/pycromanager/execution_engine/test/unit_tests/test_data_storage.py similarity index 89% rename from pycromanager/acquisition/new/test/unit_tests/test_data_storage.py rename to pycromanager/execution_engine/test/unit_tests/test_data_storage.py index f34e9169..1fa44e31 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_data_storage.py +++ b/pycromanager/execution_engine/test/unit_tests/test_data_storage.py @@ -1,8 +1,8 @@ import pytest import numpy as np -from pycromanager.acquisition.new.data_coords import DataCoordinates -from pycromanager.acquisition.new.implementations.data_storage_implementations import NDStorage -from pycromanager.acquisition.new.apis.data_storage import DataStorageAPI +from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.acquisition.execution_engine.implementations.data_storage_implementations import NDStorage +from pycromanager.acquisition.execution_engine.apis.data_storage import DataStorageAPI @pytest.fixture(params=["tiff", "ram"]) def data_storage(request, tmp_path): diff --git a/pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py b/pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py similarity index 95% rename from pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py rename to pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py index 4f25d856..b7651457 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_device_metaclass.py +++ b/pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py @@ -3,8 +3,8 @@ from typing import Any # Assuming these are imported from your actual implementation -from pycromanager.acquisition.new.internal.device import (Device, AttrAccessAcquisitionEvent, - AttrSetAcquisitionEvent, MethodCallAcquisitionEvent) +from pycromanager.execution_engine.internal.device import (Device, AttrAccessAcquisitionEvent, + AttrSetAcquisitionEvent, MethodCallAcquisitionEvent) diff --git a/pycromanager/acquisition/new/test/unit_tests/test_executor.py b/pycromanager/execution_engine/test/unit_tests/test_executor.py similarity index 93% rename from pycromanager/acquisition/new/test/unit_tests/test_executor.py rename to pycromanager/execution_engine/test/unit_tests/test_executor.py index 86532e2a..6f9c2d15 100644 --- a/pycromanager/acquisition/new/test/unit_tests/test_executor.py +++ b/pycromanager/execution_engine/test/unit_tests/test_executor.py @@ -1,8 +1,8 @@ import pytest from unittest.mock import MagicMock -from pycromanager.acquisition.new.base_classes.acq_events import AcquisitionEvent, DataProducing -from pycromanager.acquisition.new.executor import ExecutionEngine -from pycromanager.acquisition.new.acq_future import AcquisitionFuture +from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing +from pycromanager.acquisition.execution_engine.executor import ExecutionEngine +from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture import threading import time diff --git a/pycromanager/headless.py b/pycromanager/headless.py index e7e0c4be..7ff3db48 100644 --- a/pycromanager/headless.py +++ b/pycromanager/headless.py @@ -36,7 +36,7 @@ def _create_pymmcore_instance(): 2. add convenience methods to match the MMCoreJ API: """ - # Create a new dictionary for the class attributes + # Create a execution_engine dictionary for the class attributes new_attributes = {} # Iterate through the original attributes @@ -49,7 +49,7 @@ def _create_pymmcore_instance(): new_attr_name = _camel_to_snake(attr_name) new_attributes[new_attr_name] = attr_value - # Create and return a new class that subclasses the original class and has the new attributes + # Create and return a execution_engine class that subclasses the original class and has the execution_engine attributes clz = type(CMMCore.__name__ + "SnakeCase", (CMMCore,), new_attributes) instance = clz() return instance @@ -80,7 +80,7 @@ def stop_headless(debug=False): c.unloadAllDevices() if debug: logger.debug('Unloaded all devices.py') - # TODO: shutdown new engine + # TODO: shutdown execution_engine engine # Engine.get_instance().shutdown() if debug: logger.debug('Engine shut down') @@ -137,7 +137,7 @@ def start_headless( mmc.load_system_configuration(config_file) mmc.set_circular_buffer_memory_footprint(buffer_size_mb) _PYMMCORES.append(mmc) # Store so it doesn't get garbage collected - # TODO: startup new engine + # TODO: startup execution_engine engine # Engine(mmc) else: classpath = mm_app_path + '/plugins/Micro-Manager/*' diff --git a/pycromanager/mm_java_classes.py b/pycromanager/mm_java_classes.py index ac5bf87a..55a1a7e3 100644 --- a/pycromanager/mm_java_classes.py +++ b/pycromanager/mm_java_classes.py @@ -74,7 +74,7 @@ def __new__( port: int The port of the Bridge used to create the object new_socket: bool - If True, will create new java object on a new port so that blocking calls will not interfere + If True, will create execution_engine java object on a execution_engine port so that blocking calls will not interfere with the bridges main port debug: print debug messages @@ -117,7 +117,7 @@ def __new__( port: int The port of the Bridge used to create the object new_socket: bool - If True, will create new java object on a new port so that blocking calls will not interfere + If True, will create execution_engine java object on a execution_engine port so that blocking calls will not interfere with the bridges main port debug: bool print debug messages @@ -142,7 +142,7 @@ def __new__( port: int The port of the Bridge used to create the object new_socket: bool - If True, will create new java object on a new port so that blocking calls will not interfere + If True, will create execution_engine java object on a execution_engine port so that blocking calls will not interfere with the bridges main port debug: bool print debug messages diff --git a/pycromanager/napari_util.py b/pycromanager/napari_util.py index 03aba724..6fe4eb1b 100644 --- a/pycromanager/napari_util.py +++ b/pycromanager/napari_util.py @@ -8,7 +8,7 @@ def start_napari_signalling(viewer, dataset): """ - Start up a threadworker, which will check for new images arrived in the dataset + Start up a threadworker, which will check for execution_engine images arrived in the dataset and then signal to napari to update or refresh as needed :param viewer: the napari Viewer :param dataset: the Datatset being acquired @@ -17,7 +17,7 @@ def start_napari_signalling(viewer, dataset): def update_layer(image): """ - update the napari layer with the new image + update the napari layer with the execution_engine image """ if image is not None: try: @@ -29,7 +29,7 @@ def update_layer(image): @thread_worker(connect={'yielded': update_layer}) def napari_signaller(): """ - Monitor for signals that Acquisition has a new image ready, and when that happens + Monitor for signals that Acquisition has a execution_engine image ready, and when that happens update napari appropriately """ # don't update faster than the display can handle diff --git a/pycromanager/test/test_acquisition.py b/pycromanager/test/test_acquisition.py index d4b5b1f0..adc41ff9 100644 --- a/pycromanager/test/test_acquisition.py +++ b/pycromanager/test/test_acquisition.py @@ -479,7 +479,7 @@ def test_abort_from_external(launch_mm_headless, setup_data_folder): events = multi_d_acquisition_events(num_time_points=6) acq.acquire(events[0]) # this simulates an abort from the java side unbeknownst to python side - # it comes from a new thread so it is non-blocking to the port + # it comes from a execution_engine thread so it is non-blocking to the port acq._acq.abort() for event in events[1:]: acq.acquire(event) diff --git a/scripts/bridge_tests.py b/scripts/bridge_tests.py index 3ec328d7..70ccf89b 100644 --- a/scripts/bridge_tests.py +++ b/scripts/bridge_tests.py @@ -10,7 +10,7 @@ def other_thread(core): core = None -### Create an object and a child object on a new socket +### Create an object and a child object on a execution_engine socket core = ZMQRemoteMMCoreJ(debug=False) core.get_system_state_cache(new) diff --git a/scripts/lightsheet_deskew.py b/scripts/lightsheet_deskew.py index 5fc769bc..a4b81e15 100644 --- a/scripts/lightsheet_deskew.py +++ b/scripts/lightsheet_deskew.py @@ -104,7 +104,7 @@ def precompute_recon_weightings(self, do_orthogonal_views=True, do_volume=True): for z_index_camera in np.arange(self.camera_shape[0]): for y_index_camera in np.arange(self.camera_shape[1]): - # where does each line of x pixels belong in the new image? + # where does each line of x pixels belong in the execution_engine image? if (z_index_camera, y_index_camera) not in self.recon_coord_LUT: print('ignoring: ', z_index_camera, y_index_camera) continue @@ -145,7 +145,7 @@ def make_projections(self, data, do_orthogonal_views=True, do_volume=True): # do the projection/reconstruction # iterate through each z slice of the image - # at each z slice, iterate through each x pixel and copy a line of y pixels to the new image + # at each z slice, iterate through each x pixel and copy a line of y pixels to the execution_engine image for z_index_camera in np.arange(0, self.camera_shape[0], 1): image_on_camera = data[z_index_camera] for y_index_camera in range(self.camera_shape[1]): @@ -153,7 +153,7 @@ def make_projections(self, data, do_orthogonal_views=True, do_volume=True): continue source_line_of_x_pixels = image_on_camera[y_index_camera] - # where does each line of x pixels belong in the new image? + # where does each line of x pixels belong in the execution_engine image? dest_coords = self.recon_coord_LUT[(z_index_camera, y_index_camera)] for dest_coord in dest_coords: recon_z, recon_y = dest_coord diff --git a/scripts/napari_frontend.py b/scripts/napari_frontend.py index b6618f41..7e8e9fe6 100644 --- a/scripts/napari_frontend.py +++ b/scripts/napari_frontend.py @@ -22,7 +22,7 @@ def image_saved_callback(axes, d): """ - Callback function that will be used to signal to napari that a new image is ready + Callback function that will be used to signal to napari that a execution_engine image is ready """ global dataset global update_ready @@ -48,7 +48,7 @@ def run_acq(): def update_layer(image): """ - update the napari layer with the new image + update the napari layer with the execution_engine image """ if len(viewer.layers) == 0: viewer.add_image(image) @@ -61,14 +61,14 @@ def update_layer(image): @thread_worker(connect={'yielded': update_layer}) def update_images(): """ - Monitor for signals that Acqusition has a new image ready, and when that happens + Monitor for signals that Acqusition has a execution_engine image ready, and when that happens update napari appropriately """ global update_ready while True: if update_ready: update_ready = False - # A new image has arrived, but we only need to regenerate the dask array + # A execution_engine image has arrived, but we only need to regenerate the dask array # if its shape has changed shape = np.array([len(dataset.axes[name]) for name in dataset.axes.keys()]) if not hasattr(update_images, 'old_shape') or \ From bad71c9f4d54b945d3333acdcb97dab49c9ecf3b Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:32:25 +0200 Subject: [PATCH 13/20] fix thread/executor rereouting behavior --- pycromanager/execution_engine/acq_future.py | 18 ++-- .../execution_engine/apis/data_storage.py | 2 +- .../base_classes/acq_events.py | 14 ++- .../base_classes/device_types.py | 1 + pycromanager/execution_engine/data_handler.py | 8 +- pycromanager/execution_engine/executor.py | 37 +++++--- .../data_storage_implementations.py | 34 ++++--- .../implementations/event_implementations.py | 6 +- .../mm_device_implementations.py | 3 +- .../execution_engine/internal/device.py | 92 +++++++++++++------ .../execution_engine/test/integration_test.py | 8 +- .../execution_engine/test/sandbox_device.py | 57 ------------ pycromanager/execution_engine/test/sbox.py | 49 ++++++++++ .../test/unit_tests/test_device_metaclass.py | 4 +- 14 files changed, 201 insertions(+), 132 deletions(-) delete mode 100644 pycromanager/execution_engine/test/sandbox_device.py create mode 100644 pycromanager/execution_engine/test/sbox.py diff --git a/pycromanager/execution_engine/acq_future.py b/pycromanager/execution_engine/acq_future.py index 73263943..59060d3f 100644 --- a/pycromanager/execution_engine/acq_future.py +++ b/pycromanager/execution_engine/acq_future.py @@ -1,13 +1,13 @@ from typing import Union, Optional, Any, Dict, Tuple, Sequence, Set import threading import warnings -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator from typing import TYPE_CHECKING if TYPE_CHECKING: # avoid circular imports - from pycromanager.acquisition.execution_engine.data_handler import DataHandler -from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable + from pycromanager.execution_engine.data_handler import DataHandler +from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable class AcquisitionFuture: @@ -29,11 +29,17 @@ def __init__(self, event: AcquisitionEvent): # remove unsupported methods if not isinstance(self._event, DataProducing): - del self.await_data + def raise_not_implemented(*args, **kwargs): + raise NotImplementedError("This event does not DataProducing") + self.await_data = raise_not_implemented if not isinstance(self._event, Stoppable): - del self.stop + def raise_not_implemented(*args, **kwargs): + raise NotImplementedError("This event is not Stoppable") + self.stop = raise_not_implemented if not isinstance(self._event, Abortable): - del self.abort + def raise_not_implemented(*args, **kwargs): + raise NotImplementedError("This event is not Abortable") + self.abort = raise_not_implemented def await_execution(self) -> Any: """ diff --git a/pycromanager/execution_engine/apis/data_storage.py b/pycromanager/execution_engine/apis/data_storage.py index 3db4dfa6..bd77b79f 100644 --- a/pycromanager/execution_engine/apis/data_storage.py +++ b/pycromanager/execution_engine/apis/data_storage.py @@ -3,7 +3,7 @@ """ from typing import Protocol, runtime_checkable, Union, List, Tuple, Dict, Any -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.data_coords import DataCoordinates import numpy as np from pydantic.types import JsonValue diff --git a/pycromanager/execution_engine/base_classes/acq_events.py b/pycromanager/execution_engine/base_classes/acq_events.py index 3721107d..ebec483c 100644 --- a/pycromanager/execution_engine/base_classes/acq_events.py +++ b/pycromanager/execution_engine/base_classes/acq_events.py @@ -5,14 +5,14 @@ import weakref from pydantic import BaseModel -from pydantic import field_validator +from pydantic import field_validator, Field -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator -from pycromanager.acquisition.execution_engine.data_handler import DataHandler +from pycromanager.execution_engine.data_coords import DataCoordinates, DataCoordinatesIterator +from pycromanager.execution_engine.data_handler import DataHandler from typing import TYPE_CHECKING if TYPE_CHECKING: # avoid circular imports - from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture + from pycromanager.execution_engine.acq_future import AcquisitionFuture class AcquisitionEvent(BaseModel, ABC): num_retries_on_exception: int = 0 @@ -100,13 +100,17 @@ class DataProducing(BaseModel): coordinates of each piece of data (i.e. image) that will be produced by the event. For example, {time: 0}, {time: 1}, {time: 2} for a time series acquisition. """ - data_handler: DataHandler + data_handler: DataHandler = None # This is eventually an ImageCoordinatesIterator. If an Iterable[ImageCoordinates] or # Iterable[Dict[str, Union[int, str]]] is provided, it will be auto-converted to an ImageCoordinatesIterator image_coordinate_iterator: Union[DataCoordinatesIterator, Iterable[DataCoordinates], Iterable[Dict[str, Union[int, str]]]] + # TODO: is there any point to pydantic if I'm just telling it to ignore stuff anyway? + class Config: + arbitrary_types_allowed = True + @field_validator('image_coordinate_iterator', mode='before') def _convert_to_image_coordinates_iterator(cls, v): return DataCoordinatesIterator.create(v) diff --git a/pycromanager/execution_engine/base_classes/device_types.py b/pycromanager/execution_engine/base_classes/device_types.py index 597176f1..fc172c7a 100644 --- a/pycromanager/execution_engine/base_classes/device_types.py +++ b/pycromanager/execution_engine/base_classes/device_types.py @@ -4,6 +4,7 @@ from abc import abstractmethod from pycromanager.execution_engine.internal.device import Device +import numpy as np class SingleAxisActuator(Device): diff --git a/pycromanager/execution_engine/data_handler.py b/pycromanager/execution_engine/data_handler.py index 9143a05d..6f5009c2 100644 --- a/pycromanager/execution_engine/data_handler.py +++ b/pycromanager/execution_engine/data_handler.py @@ -3,15 +3,15 @@ from typing import Any, Dict, Tuple, Callable, Union, Sequence, Optional import numpy as np -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates -from pycromanager.acquisition.execution_engine.apis.data_storage import DataStorageAPI +from pycromanager.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.apis.data_storage import DataStorageAPI from pydantic.types import JsonValue from dataclasses import dataclass from typing import TYPE_CHECKING if TYPE_CHECKING: - from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture + from pycromanager.execution_engine.acq_future import AcquisitionFuture class _PeekableQueue(queue.Queue): @@ -132,13 +132,13 @@ def _run_storage_thread(self): shutdown = self._transfer_to_storage() if shutdown: break + def _transfer_to_storage(self): """ Take items from the source queue and put them into the storage queue. If there is a processing function, the source queue is the output queue of the processing function. If there is no processing function, the source queue is the intake queue. """ - coordinates = self._processed_queue.peek() if self._process_function else self._intake_queue.peek() if coordinates is None: # shutdown condition diff --git a/pycromanager/execution_engine/executor.py b/pycromanager/execution_engine/executor.py index f1f34c47..f9c0eb95 100644 --- a/pycromanager/execution_engine/executor.py +++ b/pycromanager/execution_engine/executor.py @@ -10,9 +10,9 @@ import uuid from typing import Union, Iterable -from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture -from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable -from pycromanager.acquisition.execution_engine.data_handler import DataHandler +from pycromanager.execution_engine.acq_future import AcquisitionFuture +from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable, Abortable +from pycromanager.execution_engine.data_handler import DataHandler class ExecutionEngine: @@ -20,10 +20,10 @@ class ExecutionEngine: _instance = None def __init__(self, num_threads=1): - self._threads = [] + self._thread_managers: list[_ExecutionThreadManager] = [] for _ in range(num_threads): self._start_new_thread() - self._instance = self + ExecutionEngine._instance = self @classmethod def get_instance(cls): @@ -31,8 +31,20 @@ def get_instance(cls): raise RuntimeError("ExecutionEngine has not been initialized") return cls._instance + @classmethod + def on_main_executor_thread(self): + """ + Check if the current thread is an executor thread + """ + return threading.current_thread() is ExecutionEngine.get_instance()._thread_managers[0] + + @classmethod + def on_any_executor_thread(self): + return any([m.is_managed_thread(threading.current_thread()) for m in + ExecutionEngine.get_instance()._thread_managers]) + def _start_new_thread(self): - self._threads.append(_ExecutionThreadManager()) + self._thread_managers.append(_ExecutionThreadManager()) def submit(self, event_or_events: Union[AcquisitionEvent, Iterable[AcquisitionEvent]], transpile: bool = True, prioritize: bool = False, use_free_thread: bool = False, @@ -101,14 +113,14 @@ def _submit_single_event(self, event: AcquisitionEvent, use_free_thread: bool = """ future = AcquisitionFuture(event=event) if use_free_thread: - for thread in self._threads: + for thread in self._thread_managers: if thread.is_free(): thread.submit_event(event) break self._start_new_thread() - self._threads[-1].submit_event(event) + self._thread_managers[-1].submit_event(event) else: - self._threads[0].submit_event(event, prioritize=prioritize) + self._thread_managers[0].submit_event(event, prioritize=prioritize) return future @@ -116,9 +128,9 @@ def shutdown(self): """ Stop all threads managed by this executor and wait for them to finish """ - for thread in self._threads: + for thread in self._thread_managers: thread.shutdown() - for thread in self._threads: + for thread in self._thread_managers: thread.join() @@ -145,6 +157,9 @@ def __init__(self): self._addition_condition = threading.Condition() self._thread.start() + def is_managed_thread(self, thread): + return self._thread == thread + def join(self): self._thread.join() diff --git a/pycromanager/execution_engine/implementations/data_storage_implementations.py b/pycromanager/execution_engine/implementations/data_storage_implementations.py index bf63407e..89acd041 100644 --- a/pycromanager/execution_engine/implementations/data_storage_implementations.py +++ b/pycromanager/execution_engine/implementations/data_storage_implementations.py @@ -2,25 +2,18 @@ Adapters for NDTiff and NDRam storage classes """ from typing import Union, Dict -from pycromanager.acquisition.execution_engine.apis.data_storage import DataStorageAPI -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.apis.data_storage import DataStorageAPI +from pycromanager.execution_engine.data_coords import DataCoordinates from ndstorage import NDRAMDataset, NDTiffDataset import numpy as np from pydantic.types import JsonValue -class NDStorage(DataStorageAPI): +class _NDRAMOrTiffStorage(DataStorageAPI): """ Wrapper class for NDTiffDataset and NDRAMDataset to implement the DataStorageAPI protocol """ - def __init__(self, directory: str = None, name: str = None, summary_metadata: JsonValue = None): - if directory is None: - self._storage = NDRAMDataset() - else: - self._storage = NDTiffDataset(dataset_path=directory, name=name, writable=True) - if summary_metadata is None: - summary_metadata = {} - self._storage.initialize(summary_metadata) + _storage: Union[NDTiffDataset, NDRAMDataset] def __contains__(self, data_coordinates: Union[DataCoordinates, Dict[str, Union[int, str]]]) -> bool: """Check if item is in the container.""" @@ -70,3 +63,22 @@ def close(self): self._storage.close() +class NDTiffStorage(_NDRAMOrTiffStorage): + """ + Adapter for NDTiffDataset to implement the DataStorageAPI protocol + """ + def __init__(self, directory: str, name: str = None, summary_metadata: JsonValue = None): + self._storage = NDTiffDataset(dataset_path=directory, name=name, writable=True) + if summary_metadata is None: + summary_metadata = {} + self._storage.initialize(summary_metadata) + +class NDRAMStorage(_NDRAMOrTiffStorage): + """ + Adapter for NDRAMDataset to implement the DataStorageAPI protocol + """ + def __init__(self, summary_metadata: JsonValue = None): + self._storage = NDRAMDataset() + if summary_metadata is None: + summary_metadata = {} + self._storage.initialize(summary_metadata) \ No newline at end of file diff --git a/pycromanager/execution_engine/implementations/event_implementations.py b/pycromanager/execution_engine/implementations/event_implementations.py index 828d6426..3a4ca3eb 100644 --- a/pycromanager/execution_engine/implementations/event_implementations.py +++ b/pycromanager/execution_engine/implementations/event_implementations.py @@ -3,9 +3,9 @@ """ from typing import Iterable import itertools -from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing -from pycromanager.acquisition.execution_engine.base_classes.devices import Camera -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing +from pycromanager.execution_engine.base_classes.device_types import Camera +from pycromanager.execution_engine.data_coords import DataCoordinates class ReadoutImages(AcquisitionEvent, DataProducing): diff --git a/pycromanager/execution_engine/implementations/mm_device_implementations.py b/pycromanager/execution_engine/implementations/mm_device_implementations.py index 223e8030..c1568857 100644 --- a/pycromanager/execution_engine/implementations/mm_device_implementations.py +++ b/pycromanager/execution_engine/implementations/mm_device_implementations.py @@ -2,7 +2,7 @@ Implementation of Micro-Manager devices.py in terms of the AcqEng bottom API """ -from pycromanager.acquisition.execution_engine.base_classes.devices import Camera +from pycromanager.execution_engine.base_classes.device_types import Camera from pycromanager.core import Core import numpy as np import pymmcore @@ -18,6 +18,7 @@ def __init__(self, device_name=None): :param device_name: Name of the camera device in Micro-Manager. If None, and there is only one camera, that camera will be used. If None and there are multiple cameras, an error will be raised """ + super().__init__() self._core = Core() camera_names = self._core.get_loaded_devices_of_type(2) # 2 means camera... if not camera_names: diff --git a/pycromanager/execution_engine/internal/device.py b/pycromanager/execution_engine/internal/device.py index dc563978..bd679e61 100644 --- a/pycromanager/execution_engine/internal/device.py +++ b/pycromanager/execution_engine/internal/device.py @@ -4,10 +4,12 @@ from abc import ABC, ABCMeta from functools import wraps from typing import Any, Dict +from weakref import WeakSet +import inspect -from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent -from pycromanager.acquisition.execution_engine.executor import ExecutionEngine - +from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent +from pycromanager.execution_engine.executor import ExecutionEngine +import threading class MethodCallAcquisitionEvent(AcquisitionEvent): method_name: str @@ -19,12 +21,12 @@ def execute(self): method = getattr(self.instance, self.method_name) return method(*self.args, **self.kwargs) -class AttrAccessAcquisitionEvent(AcquisitionEvent): +class AttrGetAcquisitionEvent(AcquisitionEvent): attr_name: str instance: Any def execute(self): - return getattr(self.instance, self.attr_name) + return object.__getattribute__(self.instance, self.attr_name) class AttrSetAcquisitionEvent(AcquisitionEvent): attr_name: str @@ -32,8 +34,7 @@ class AttrSetAcquisitionEvent(AcquisitionEvent): instance: Any def execute(self): - setattr(self.instance, self.attr_name, self.value) - + object.__setattr__(self.instance, self.attr_name, self.value) class DeviceMetaclass(ABCMeta): """ @@ -50,8 +51,10 @@ def wrap_for_executor(attr_name, attr_value): @wraps(attr_value) def wrapper(self: 'Device', *args: Any, **kwargs: Any) -> Any: + if ExecutionEngine.on_any_executor_thread(): + return attr_value(self, *args, **kwargs) event = MethodCallAcquisitionEvent(method_name=attr_name, args=args, kwargs=kwargs, instance=self) - return self._executor.submit(event).await_execution() + return ExecutionEngine.get_instance().submit(event).await_execution() wrapper._wrapped_for_executor = True return wrapper @@ -68,33 +71,68 @@ def __new__(mcs, name: str, bases: tuple, attrs: dict) -> Any: else: new_attrs[attr_name] = attr_value - def __getattr__(self: 'Device', name: str) -> Any: - if name.startswith('__'): - return super().__getattribute__(name) - event = AttrAccessAcquisitionEvent( - attr_name=name, - instance=self - ) - return self._executor.submit(event).await_execution() + def __getattribute__(self: 'Device', name: str) -> Any: + if (ExecutionEngine.on_any_executor_thread() or name in ['_device_threads', '_is_internal_thread'] + or self._is_internal_thread()): + # we're already on the executor thread, so we can just return the attribute + # TODO (maybe) if submit_to_free_thread is added, could allow submitting to a differente executor thread + return object.__getattribute__(self, name) + # TODO: it could make sense to except certain calls from being rerouted to the executor...TBD + # elif name.startswith('_'): + # return object.__getattribute__(self, name) + else: + # if getattr(sys, 'gettrace', None) and name == 'shape': + # return None + event = AttrGetAcquisitionEvent(attr_name=name, instance=self) + return ExecutionEngine.get_instance().submit(event).await_execution() def __setattr__(self: 'Device', name: str, value: Any) -> None: - if name.startswith('_'): - object.__setattr__(self, name, value) + if (ExecutionEngine.on_any_executor_thread() or name in ['_device_threads', '_is_internal_thread'] + or self._is_internal_thread()): + object.__setattr__(self, name, value) # we're already on the executor thread, so just set it + # TODO: it could make sense to except certain calls from being rerouted to the executor...TBD + # elif name.startswith('_'): + # object.__setattr__(self, name, value) else: - event = AttrSetAcquisitionEvent( - attr_name=name, - value=value, - instance=self - ) - self._executor.submit(event).await_execution() - - new_attrs['__getattr__'] = __getattr__ + event = AttrSetAcquisitionEvent(attr_name=name, value=value, instance=self) + ExecutionEngine.get_instance().submit(event).await_execution() + + new_attrs['__getattribute__'] = __getattribute__ new_attrs['__setattr__'] = __setattr__ return super().__new__(mcs, name, bases, new_attrs) class Device(ABC, metaclass=DeviceMetaclass): + + _device_threads: WeakSet[threading.Thread] + def __init__(self): - self._executor: ExecutionEngine = ExecutionEngine.get_instance() + self._device_threads = WeakSet() + + def _is_internal_thread(self): + """ + Device calls get routed through the executor by default, but they are also allowed to have their + own internal threads outside the executor, and rereouting these to the executor could cause deadlocks and + confusing behavior. This method is used to determine if the current thread is one of these internal threads, + and if so, the device will not reroute the call to the executor + """ + current_thread = threading.current_thread() + + if current_thread in self._device_threads: + return True + + # If not in set, perform the check + frame = inspect.currentframe() + try: + while frame: + if frame.f_locals.get('self') is self: + # Add the thread to the set + self._device_threads.add(current_thread) + return True + frame = frame.f_back + finally: + del frame + + return False diff --git a/pycromanager/execution_engine/test/integration_test.py b/pycromanager/execution_engine/test/integration_test.py index 4348f5c8..6768de84 100644 --- a/pycromanager/execution_engine/test/integration_test.py +++ b/pycromanager/execution_engine/test/integration_test.py @@ -1,9 +1,9 @@ from pycromanager import start_headless -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates -from pycromanager.acquisition.execution_engine.implementations.mm_device_implementations import MicroManagerCamera +from pycromanager.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.implementations.mm_device_implementations import MicroManagerCamera import os -from pycromanager.acquisition.execution_engine.executor import ExecutionEngine -from pycromanager.acquisition.execution_engine.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler +from pycromanager.execution_engine.executor import ExecutionEngine +from pycromanager.execution_engine.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler mm_install_dir = '/Users/henrypinkard/Micro-Manager' diff --git a/pycromanager/execution_engine/test/sandbox_device.py b/pycromanager/execution_engine/test/sandbox_device.py deleted file mode 100644 index 00292da9..00000000 --- a/pycromanager/execution_engine/test/sandbox_device.py +++ /dev/null @@ -1,57 +0,0 @@ -from pycromanager import start_headless -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates -from pycromanager.acquisition.execution_engine.implementations.mm_device_implementations import MicroManagerCamera -import os - -mm_install_dir = '/Users/henrypinkard/Micro-Manager' -config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') -start_headless(mm_install_dir, config_file, - buffer_size_mb=1024, max_memory_mb=1024, # set these low for github actions - python_backend=True, - debug=False) - - -camera = MicroManagerCamera() - - -from pycromanager.acquisition.execution_engine.executor import ExecutionEngine -executor = ExecutionEngine() - - -from pycromanager.acquisition.execution_engine.base_classes.acq_events import StartCapture, ReadoutImages, DataHandler - -num_images = 100 -data_output_queue = DataHandler() - -start_capture_event = StartCapture(num_images=num_images, camera=camera) -readout_images_event = ReadoutImages(num_images=num_images, camera=camera, - image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], - output_queue=data_output_queue) - -executor.submit_event(start_capture_event) -executor.submit_event(readout_images_event, use_free_thread=True) - -image_count = 0 -while True: - coordinates, image, metadata = data_output_queue.get() - image_count += 1 - print(f"Got image {image_count} ", f'pixel mean {image.mean()}' ) - - - -# -# events = [] -# coord_list = [ImageCoordinates(time=t) for t in range(10)] -# for coord in coord_list: -# events.append(ReadoutImages(num_images=1, camera=camera, image_coordinate_iterator=coord_list)) -# -# with Acquisition(show_display=False, debug=True) as acq: -# acq.acquire(events) - - - -# -# with Acquisition(show_display=False, debug=True) as acq: -# # copy list of events to avoid popping from original -# acq.acquire(multi_d_acquisition_events(num_time_points=10)) - diff --git a/pycromanager/execution_engine/test/sbox.py b/pycromanager/execution_engine/test/sbox.py new file mode 100644 index 00000000..d05d7a25 --- /dev/null +++ b/pycromanager/execution_engine/test/sbox.py @@ -0,0 +1,49 @@ +from pycromanager import start_headless, stop_headless +from pycromanager.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.implementations.mm_device_implementations import MicroManagerCamera +import os +from pycromanager.execution_engine.executor import ExecutionEngine +from pycromanager.execution_engine.base_classes.acq_events import DataHandler +from pycromanager.execution_engine.implementations.data_storage_implementations import NDRAMStorage +from pycromanager.execution_engine.implementations.event_implementations import StartCapture, ReadoutImages +from pycromanager import Core + +mm_install_dir = '/Users/henrypinkard/Micro-Manager' +config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') +start_headless(mm_install_dir, config_file, + buffer_size_mb=1024, max_memory_mb=1024, # set these low for github actions + python_backend=True, + debug=False) + + +executor = ExecutionEngine() + + + +camera = MicroManagerCamera() + +num_images = 100 +data_handler = DataHandler(storage=NDRAMStorage()) + +start_capture_event = StartCapture(num_images=num_images, camera=camera) +readout_images_event = ReadoutImages(num_images=num_images, camera=camera, + image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], + data_handler=data_handler) +executor.submit(start_capture_event) +future = executor.submit(readout_images_event) + +future.await_execution() + +data_handler.finish() + +executor.shutdown() +stop_headless() + + + +# # print all threads that are still a +# import threading +# +# for thread in threading.enumerate(): +# print(thread) +# pass \ No newline at end of file diff --git a/pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py b/pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py index b7651457..ad37b2db 100644 --- a/pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py +++ b/pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py @@ -3,7 +3,7 @@ from typing import Any # Assuming these are imported from your actual implementation -from pycromanager.execution_engine.internal.device import (Device, AttrAccessAcquisitionEvent, +from pycromanager.execution_engine.internal.device import (Device, AttrGetAcquisitionEvent, AttrSetAcquisitionEvent, MethodCallAcquisitionEvent) @@ -90,7 +90,7 @@ def test_public_attribute_get(test_device, mock_executor): _ = test_device.public_attr # get the list of attribute access events - attr_access_events = [call[0][0] for call in mock_executor.execute.call_args_list if isinstance(call[0][0], AttrAccessAcquisitionEvent)] + attr_access_events = [call[0][0] for call in mock_executor.execute.call_args_list if isinstance(call[0][0], AttrGetAcquisitionEvent)] # filter to only AttrAccessAcquisitionEvents with the correct attribute name attr_access_events = [event for event in attr_access_events if event.attr_name == 'public_attr'] # check for only one AttrAccessAcquisitionEvent in the calls (other types of events are ok) From 92f767b5772ecd3daaab3ca3e63ee45a1aef7bdf Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:12:04 +0200 Subject: [PATCH 14/20] various bug fixes and incremental improvements --- pycromanager/execution_engine/acq_future.py | 24 ++- .../base_classes/acq_events.py | 8 +- .../base_classes/device_types.py | 2 + pycromanager/execution_engine/executor.py | 62 ++++---- .../implementations/event_implementations.py | 34 +++-- .../mm_device_implementations.py | 9 +- .../test/integration_tests/camera_tests.py | 111 +++++++------- .../continuous_capture_test.py | 137 ++++++++++++++++++ .../test/unit_tests/test_executor.py | 72 +++++++-- 9 files changed, 340 insertions(+), 119 deletions(-) create mode 100644 pycromanager/execution_engine/test/integration_tests/continuous_capture_test.py diff --git a/pycromanager/execution_engine/acq_future.py b/pycromanager/execution_engine/acq_future.py index 59060d3f..bb09d848 100644 --- a/pycromanager/execution_engine/acq_future.py +++ b/pycromanager/execution_engine/acq_future.py @@ -53,23 +53,31 @@ def await_execution(self) -> Any: raise self._exception return self._return_value - def stop(self): + def stop(self, await_completion: bool = False): """ (Only for AcquistionEvents that also inherit from Stoppable) - Request the acquisition event to stop its execution. This will return immediately, - but set a flag that the event should stop at the next opportunity. It is up to the implementation of the - event to check this flag and stop its execution. + Request the acquisition event to stop its execution. Stop means the event should initiate a graceful shutdown. + The details of what this means are up to the implementation of the event. + + Args: + await_completion: Whether to block until the event has completed its execution """ self._event._stop() + if await_completion: + self.await_execution() - def abort(self): + def abort(self, await_completion: bool = False): """ (Only for AcquistionEvents that also inherit from Abortable) - Request the acquisition event to abort its execution. This will return immediately, - but set a flag that the event should abort at the next opportunity. It is up to the implementation of the - event to check this flag and abort its execution. + Request the acquisition event to abort its execution. Abort means the event should immediately stop its execution + The details of what this means are up to the implementation of the event. + + Args: + await_completion: Whether to block until the event has completed its execution """ self._event._abort() + if await_completion: + self.await_execution() def await_data(self, coordinates: Optional[Union[DataCoordinates, Dict[str, Union[int, str]], diff --git a/pycromanager/execution_engine/base_classes/acq_events.py b/pycromanager/execution_engine/base_classes/acq_events.py index ebec483c..bd44b79f 100644 --- a/pycromanager/execution_engine/base_classes/acq_events.py +++ b/pycromanager/execution_engine/base_classes/acq_events.py @@ -56,8 +56,14 @@ def _post_execution(self, return_value: Optional[Any] = None, exception: Optiona future = self._future_weakref() if future is not None: future._notify_execution_complete(return_value, exception) + self._finished = True + + def is_finished(self): + return self._finished class Stoppable: + # TODO: this should be on the future, if youre not going to merge them into one + # becuase the event can be reused """ Acquistition events that can be stopped should inherit from this class. They are responsible for checking if is_stop_requested() returns True and stopping their execution if it does. When stopping, an orderly shutdown @@ -68,7 +74,7 @@ class Stoppable: def _stop(self): """ - This is handled by the Future + This is called by the acquisitionFuture object """ self._stop_requested = True diff --git a/pycromanager/execution_engine/base_classes/device_types.py b/pycromanager/execution_engine/base_classes/device_types.py index fc172c7a..aaa45a4e 100644 --- a/pycromanager/execution_engine/base_classes/device_types.py +++ b/pycromanager/execution_engine/base_classes/device_types.py @@ -48,6 +48,7 @@ def arm(self, frame_count=None) -> None: def start(self) -> None: ... + # TODO: is it possible to make this return the number of images captured? @abstractmethod def stop(self) -> None: ... @@ -56,6 +57,7 @@ def stop(self) -> None: def is_stopped(self) -> bool: ... + # TODO: perhaps this should be a seperate buffer class @abstractmethod def pop_image(self, timeout=None) -> (np.ndarray, dict): """ diff --git a/pycromanager/execution_engine/executor.py b/pycromanager/execution_engine/executor.py index f9c0eb95..de510c77 100644 --- a/pycromanager/execution_engine/executor.py +++ b/pycromanager/execution_engine/executor.py @@ -18,6 +18,7 @@ class ExecutionEngine: _instance = None + _debug = False def __init__(self, num_threads=1): self._thread_managers: list[_ExecutionThreadManager] = [] @@ -32,20 +33,25 @@ def get_instance(cls): return cls._instance @classmethod - def on_main_executor_thread(self): + def on_main_executor_thread(cls): """ Check if the current thread is an executor thread """ return threading.current_thread() is ExecutionEngine.get_instance()._thread_managers[0] @classmethod - def on_any_executor_thread(self): + def on_any_executor_thread(cls): + if ExecutionEngine.get_instance() is None: + raise RuntimeError("ExecutionEngine has not been initialized") return any([m.is_managed_thread(threading.current_thread()) for m in ExecutionEngine.get_instance()._thread_managers]) def _start_new_thread(self): self._thread_managers.append(_ExecutionThreadManager()) + def set_debug_mode(self, debug): + ExecutionEngine._debug = debug + def submit(self, event_or_events: Union[AcquisitionEvent, Iterable[AcquisitionEvent]], transpile: bool = True, prioritize: bool = False, use_free_thread: bool = False, data_handler: DataHandler = None) -> Union[AcquisitionFuture, Iterable[AcquisitionFuture]]: @@ -113,12 +119,15 @@ def _submit_single_event(self, event: AcquisitionEvent, use_free_thread: bool = """ future = AcquisitionFuture(event=event) if use_free_thread: + need_new_thread = True for thread in self._thread_managers: if thread.is_free(): thread.submit_event(event) + need_new_thread = False break - self._start_new_thread() - self._thread_managers[-1].submit_event(event) + if need_new_thread: + self._start_new_thread() + self._thread_managers[-1].submit_event(event) else: self._thread_managers[0].submit_event(event, prioritize=prioritize) @@ -189,32 +198,32 @@ def _run_thread(self): self._event_executing = True # Event execution loop - while True: + exception = None + return_val = None + for attempt_number in range(event.num_retries_on_exception + 1): + if self._terminate_event.is_set(): + return # Executor has been terminated try: - if event._finished: - raise RuntimeError("Event was already executed") + if ExecutionEngine._debug: + print("Executing event", event.__class__.__name__, threading.current_thread()) + if event.is_finished(): + raise RuntimeError("Event ", event, " was already executed") return_val = event.execute() - event._finished = True - stopped = isinstance(event, Stoppable) and event.is_stop_requested() - aborted = isinstance(event, Abortable) and event.is_abort_requested() - event._post_execution(return_value=return_val, stopped=stopped, aborted=aborted) # notify futures - with self._addition_condition: - self._event_executing = False + if ExecutionEngine._debug: + print("Finished executing", event.__class__.__name__, threading.current_thread()) break except Exception as e: - if num_retries > 0: - if self._terminate_event.is_set(): - return - num_retries -= 1 - warnings.warn(f"Exception during event execution, retrying {num_retries} more times") - traceback.print_exc() - else: - traceback.print_exc() - event._post_execution(exception=e) # notify futures - with self._addition_condition: - self._event_executing = False - event._finished = True - raise e # re-raise the exception to stop the thread + warnings.warn(f"Exception during event execution, retrying {num_retries} more times") + traceback.print_exc() + exception = e + + stopped = isinstance(event, Stoppable) and event.is_stop_requested() + aborted = isinstance(event, Abortable) and event.is_abort_requested() + event._post_execution(return_value=return_val, stopped=stopped, aborted=aborted, exception=exception) + with self._addition_condition: + self._event_executing = False + if exception: + raise exception event = None def is_free(self): @@ -239,7 +248,6 @@ def submit_event(self, event, prioritize=False): self._deque.append(event) self._addition_condition.notify_all() - def terminate(self): """ Stop the thread immediately, without waiting for the current event to finish diff --git a/pycromanager/execution_engine/implementations/event_implementations.py b/pycromanager/execution_engine/implementations/event_implementations.py index 3a4ca3eb..a6f7e712 100644 --- a/pycromanager/execution_engine/implementations/event_implementations.py +++ b/pycromanager/execution_engine/implementations/event_implementations.py @@ -3,12 +3,21 @@ """ from typing import Iterable import itertools -from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing +from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing, Stoppable from pycromanager.execution_engine.base_classes.device_types import Camera from pycromanager.execution_engine.data_coords import DataCoordinates +import time +class Sleep(AcquisitionEvent): + """ + Sleep for a specified amount of time + """ + time_s: int + + def execute(self): + time.sleep(self.time_s) -class ReadoutImages(AcquisitionEvent, DataProducing): +class ReadoutImages(AcquisitionEvent, DataProducing, Stoppable): """ Readout one or more images (and associated metadata) from a camera @@ -16,27 +25,32 @@ class ReadoutImages(AcquisitionEvent, DataProducing): num_images (int): The number of images to read out. If None, the readout will continue until the image_coordinate_iterator is exhausted or the camera is stopped and no more images are available. camera (Camera): The camera object to read images from. + stop_on_empty (bool): If True, the readout will stop when the camera is stopped when there is not an + image available to read image_coordinate_iterator (Iterable[DataCoordinates]): An iterator or list of ImageCoordinates objects, which specify the coordinates of the images that will be read out, should be able to provide at least num_images elements. """ num_images: int = None - camera: Camera + camera: Camera # TODO: should this change to a buffer object? + stop_on_empty: bool = False def execute(self): + # TODO a more efficient way to do this is with callbacks from the camera + # but this is not currently implemented, at least for Micro-Manager cameras image_counter = itertools.count() if self.num_images is None else range(self.num_images) for image_number, image_coordinates in zip(image_counter, self.image_coordinate_iterator): while True: - # TODO: read from state to check for cancel condition - # this can be made more efficient in the future with a execution_engine image buffer that provides callbacks - # on a execution_engine image recieved so that polling can be avoided + # check if event.stop has been called + if self.is_stop_requested(): + return image, metadata = self.camera.pop_image(timeout=0.01) # only block for 10 ms so stop event can be checked - if image is not None: + if image is None and self.stop_on_empty: + return + elif image is not None: self.put_data(image_coordinates, image, metadata) break - # check stopping condition - if self.camera.is_stopped(): - break + class StartCapture(AcquisitionEvent): diff --git a/pycromanager/execution_engine/implementations/mm_device_implementations.py b/pycromanager/execution_engine/implementations/mm_device_implementations.py index c1568857..f7ac42ef 100644 --- a/pycromanager/execution_engine/implementations/mm_device_implementations.py +++ b/pycromanager/execution_engine/implementations/mm_device_implementations.py @@ -55,7 +55,7 @@ def arm(self, frame_count=None) -> None: pass else: self._core.prepare_sequence_acquisition(self.device_name) - self._frame_count = 1 + self._frame_count = frame_count def start(self) -> None: if self._frame_count == 1: @@ -78,14 +78,17 @@ def stop(self) -> None: self._core.stop_sequence_acquisition(self.device_name) def is_stopped(self) -> bool: - return self._core.is_sequence_running(self.device_name) and not self._snap_active + return not self._core.is_sequence_running(self.device_name) and not self._snap_active def pop_image(self, timeout=None) -> (np.ndarray, dict): if self._frame_count != 1: md = pymmcore.Metadata() start_time = time.time() while True: - pix = self._core.pop_next_image_md(0, 0, md) + try: + pix = self._core.pop_next_image_md(0, 0, md) + except IndexError as e: + pix = None if pix is not None: break # sleep for the shortest possible time, only to allow the thread to be interrupted and prevent diff --git a/pycromanager/execution_engine/test/integration_tests/camera_tests.py b/pycromanager/execution_engine/test/integration_tests/camera_tests.py index 28530a9b..5280a570 100644 --- a/pycromanager/execution_engine/test/integration_tests/camera_tests.py +++ b/pycromanager/execution_engine/test/integration_tests/camera_tests.py @@ -1,14 +1,14 @@ import time from pycromanager import start_headless -from pycromanager.acquisition.execution_engine.data_coords import DataCoordinates -from pycromanager.acquisition.execution_engine.implementations.mm_device_implementations import MicroManagerCamera +from pycromanager.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.implementations.mm_device_implementations import MicroManagerCamera import os -from pycromanager.acquisition.execution_engine.executor import ExecutionEngine -from pycromanager.acquisition.execution_engine.implementations.event_implementations import StartCapture, ReadoutImages, \ +from pycromanager.execution_engine.executor import ExecutionEngine +from pycromanager.execution_engine.implementations.event_implementations import StartCapture, ReadoutImages, \ StartContinuousCapture, StopCapture -from pycromanager.acquisition.execution_engine.data_handler import DataHandler -from pycromanager.acquisition.execution_engine.implementations.data_storage_implementations import NDStorage +from pycromanager.execution_engine.data_handler import DataHandler +from pycromanager.execution_engine.implementations.data_storage_implementations import NDRAMStorage import itertools @@ -20,75 +20,68 @@ python_backend=True, debug=False) -camera = MicroManagerCamera() executor = ExecutionEngine() +camera = MicroManagerCamera() -### Finite sequence -num_images = 100 -storage = NDStorage() -data_handler = DataHandler(storage=storage) - -start_capture_event = StartCapture(num_images=num_images, camera=camera) -readout_images_event = ReadoutImages(num_images=num_images, camera=camera, - image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], - data_handler=data_handler) - -executor.submit([start_capture_event, readout_images_event]) - -image_count = 0 -# TODO: monitor this with notifications - -while not {'time': num_images - 1} in storage: - time.sleep(1) - -print('Finished first one') - - -#### Live mode -storage = NDStorage() -data_handler = DataHandler(storage=storage) -start_capture_event = StartContinuousCapture(camera=camera) -readout_images_event = ReadoutImages(num_images=num_images, camera=camera, - # TODO change this to infinite - image_coordinate_iterator=(DataCoordinates(time=t) for t in itertools.count()), - data_handler=data_handler) -stop_capture = StopCapture(camera=camera) -executor.submit([start_capture_event, readout_images_event]) -time.sleep(2) -# Readout images is continuously running on one thread, so need to do this on another thread -executor.submit(stop_capture, use_free_thread=True) +# ### Finite sequence +# num_images = 100 +# storage = NDRAMStorage() +# data_handler = DataHandler(storage=storage) +# +# start_capture_event = StartCapture(num_images=num_images, camera=camera) +# readout_images_event = ReadoutImages(num_images=num_images, camera=camera, +# image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], +# data_handler=data_handler) +# +# executor.submit([start_capture_event, readout_images_event]) +# +# image_count = 0 +# # TODO: monitor this with notifications +# +# while not {'time': num_images - 1} in storage: +# time.sleep(1) +# +# data_handler.finish() +# print('Finished first one') -image_count = 0 -# TODO: monitor this with notifications -while not {'time': num_images - 1} in storage: - time.sleep(1) -print('Finished second one') -num_images = 1 -storage = NDStorage() -data_handler = DataHandler(storage=storage) +# num_images = 1 +# storage = NDRAMStorage() +# data_handler = DataHandler(storage=storage) +# +# start_capture_event = StartCapture(num_images=num_images, camera=camera) +# readout_images_event = ReadoutImages(num_images=num_images, camera=camera, +# image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], +# data_handler=data_handler) +# +# executor.submit([start_capture_event, readout_images_event]) +# +# image_count = 0 +# # TODO: monitor this with notifications +# +# while not {'time': num_images - 1} in storage: +# time.sleep(1) +# +# data_handler.finish() +# print('Finished single image') -start_capture_event = StartCapture(num_images=num_images, camera=camera) -readout_images_event = ReadoutImages(num_images=num_images, camera=camera, - image_coordinate_iterator=[DataCoordinates(time=t) for t in range(num_images)], - data_handler=data_handler) -executor.submit([start_capture_event, readout_images_event]) -image_count = 0 -# TODO: monitor this with notifications -while not {'time': num_images - 1} in storage: - time.sleep(1) +executor.shutdown() -print('Finished single image') \ No newline at end of file +# # pritn all active threads +# import threading +# +# for thread in threading.enumerate(): +# print(thread) \ No newline at end of file diff --git a/pycromanager/execution_engine/test/integration_tests/continuous_capture_test.py b/pycromanager/execution_engine/test/integration_tests/continuous_capture_test.py new file mode 100644 index 00000000..f1b77669 --- /dev/null +++ b/pycromanager/execution_engine/test/integration_tests/continuous_capture_test.py @@ -0,0 +1,137 @@ +import time + +from pycromanager import start_headless +from pycromanager.execution_engine.data_coords import DataCoordinates +from pycromanager.execution_engine.implementations.mm_device_implementations import MicroManagerCamera +import os +from pycromanager.execution_engine.executor import ExecutionEngine +from pycromanager.execution_engine.implementations.event_implementations import StartCapture, ReadoutImages, \ + StartContinuousCapture, StopCapture, Sleep +from pycromanager.execution_engine.data_handler import DataHandler +from pycromanager.execution_engine.implementations.data_storage_implementations import NDRAMStorage +import itertools + + +# TODO: make this a pytest startup fixture +mm_install_dir = '/Users/henrypinkard/Micro-Manager' +config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') +start_headless(mm_install_dir, config_file, + buffer_size_mb=1024, max_memory_mb=1024, # set these low for github actions + python_backend=True, + debug=False) + +executor = ExecutionEngine() + +camera = MicroManagerCamera() +executor.set_debug_mode(True) + + +#### Version 1: submit start--readout--stop events in the same thread and manually stop readout from main thread +print('version 1') +storage = NDRAMStorage() +data_handler = DataHandler(storage=storage) + + + +start_capture_event = StartContinuousCapture(camera=camera) +readout_images_event = ReadoutImages(camera=camera, + image_coordinate_iterator=(DataCoordinates(time=t) for t in itertools.count()), + data_handler=data_handler) +stop_capture_event = StopCapture(camera=camera) + +_, readout_future, _ = executor.submit([start_capture_event, readout_images_event, stop_capture_event]) +time.sleep(2) +readout_future.stop(await_completion=True) + + +# make sure 10 images were collected +while not {'time': 10} in storage: + time.sleep(1) +data_handler.finish() + + + + + +### Version 2: submit start--sleep--stop--readout events all in a single thread +# TODO: maybe need some synchronization here becuase the camera could stop before any images are ready.. +print('version 2') +storage = NDRAMStorage() +data_handler = DataHandler(storage=storage) + + +start_capture_event = StartContinuousCapture(camera=camera) +readout_images_event = ReadoutImages(camera=camera, + image_coordinate_iterator=(DataCoordinates(time=t) for t in itertools.count()), + data_handler=data_handler, stop_on_empty=True) +stop_capture_event = StopCapture(camera=camera) +sleep_event = Sleep(time_s=2) + +_, _, _, _ = executor.submit([start_capture_event, sleep_event, stop_capture_event, readout_images_event]) + + +# make sure 10 images were collected +while not {'time': 10} in storage: + time.sleep(1) +data_handler.finish() + + + + + + +### Version 3: readout images in parallel with capture +print('version 3') +storage = NDRAMStorage() +data_handler = DataHandler(storage=storage) + + +start_capture_event = StartContinuousCapture(camera=camera) +readout_images_event = ReadoutImages(camera=camera, num_images=10, + image_coordinate_iterator=(DataCoordinates(time=t) for t in itertools.count()), + data_handler=data_handler) +stop_capture_event = StopCapture(camera=camera) +sleep_event = Sleep(time_s=2) + +_, _, _ = executor.submit([start_capture_event, sleep_event, stop_capture_event]) +executor.submit(readout_images_event, use_free_thread=True) + + +# make sure 10 images were collected +while not {'time': 9} in storage: + time.sleep(1) +data_handler.finish() + + + + +# Version 4: directly make API calls on camera and maybe interleave with readout +print('version 4') +storage = NDRAMStorage() +data_handler = DataHandler(storage=storage) + + +readout_images_event = ReadoutImages(camera=camera, num_images=10, + image_coordinate_iterator=(DataCoordinates(time=t) for t in itertools.count()), + data_handler=data_handler) +camera.arm(100) +camera.start() + +executor.submit(readout_images_event) + +# make sure 10 images were collected +while not {'time': 9} in storage: + time.sleep(1) +data_handler.finish() + + + + + +executor.shutdown() + +# pritn all active threads +import threading + +for thread in threading.enumerate(): + print(thread) \ No newline at end of file diff --git a/pycromanager/execution_engine/test/unit_tests/test_executor.py b/pycromanager/execution_engine/test/unit_tests/test_executor.py index 6f9c2d15..8e0ecfc6 100644 --- a/pycromanager/execution_engine/test/unit_tests/test_executor.py +++ b/pycromanager/execution_engine/test/unit_tests/test_executor.py @@ -1,8 +1,8 @@ import pytest from unittest.mock import MagicMock -from pycromanager.acquisition.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing -from pycromanager.acquisition.execution_engine.executor import ExecutionEngine -from pycromanager.acquisition.execution_engine.acq_future import AcquisitionFuture +from pycromanager.execution_engine.base_classes.acq_events import AcquisitionEvent, DataProducing +from pycromanager.execution_engine.executor import ExecutionEngine +from pycromanager.execution_engine.acq_future import AcquisitionFuture import threading import time @@ -10,15 +10,17 @@ def create_sync_event(start_event, finish_event): event = MagicMock(spec=AcquisitionEvent) event.num_retries_on_exception = 0 - event._uuid = None + event._finished = False event.executed = False event.executed_time = None + event.execute_count = 0 def execute(): start_event.set() # Signal that the execution has started finish_event.wait() # Wait for the signal to finish event.executed = True event.executed_time = time.time() + event.execute_count += 1 event.execute.side_effect = execute event._post_execution = MagicMock() @@ -31,11 +33,15 @@ def acquisition_event_executor(): def test_submit_single_event(acquisition_event_executor): + """ + Test submitting a single event to the ExecutionEngine. + Verifies that the event is executed and returns an AcquisitionFuture. + """ start_event = threading.Event() finish_event = threading.Event() event = create_sync_event(start_event, finish_event) - future = acquisition_event_executor.submit_event(event) + future = acquisition_event_executor.submit(event) start_event.wait() # Wait for the event to start executing finish_event.set() # Signal the event to finish acquisition_event_executor.shutdown() @@ -45,6 +51,10 @@ def test_submit_single_event(acquisition_event_executor): def test_submit_multiple_events(acquisition_event_executor): + """ + Test submitting multiple events to the ExecutionEngine. + Verifies that all events are executed and return AcquisitionFutures. + """ start_event1 = threading.Event() finish_event1 = threading.Event() event1 = create_sync_event(start_event1, finish_event1) @@ -53,8 +63,8 @@ def test_submit_multiple_events(acquisition_event_executor): finish_event2 = threading.Event() event2 = create_sync_event(start_event2, finish_event2) - future1 = acquisition_event_executor.submit_event(event1) - future2 = acquisition_event_executor.submit_event(event2) + future1 = acquisition_event_executor.submit(event1) + future2 = acquisition_event_executor.submit(event2) start_event1.wait() # Wait for the first event to start executing finish_event1.set() # Signal the first event to finish @@ -69,6 +79,10 @@ def test_submit_multiple_events(acquisition_event_executor): def test_event_prioritization(acquisition_event_executor): + """ + Test event prioritization in the ExecutionEngine. + Verifies that prioritized events are executed before non-prioritized events. + """ start_event1 = threading.Event() finish_event1 = threading.Event() event1 = create_sync_event(start_event1, finish_event1) @@ -81,11 +95,11 @@ def test_event_prioritization(acquisition_event_executor): finish_event3 = threading.Event() event3 = create_sync_event(start_event3, finish_event3) - acquisition_event_executor.submit_event(event1) + acquisition_event_executor.submit(event1) start_event1.wait() # Wait for the first event to start executing - acquisition_event_executor.submit_event(event2) - acquisition_event_executor.submit_event(event3, prioritize=True) + acquisition_event_executor.submit(event2) + acquisition_event_executor.submit(event3, prioritize=True) finish_event1.set() finish_event2.set() @@ -101,6 +115,10 @@ def test_event_prioritization(acquisition_event_executor): def test_use_free_thread_parallel_execution(acquisition_event_executor): + """ + Test parallel execution using free threads in the ExecutionEngine. + Verifies that events submitted with use_free_thread=True can execute in parallel. + """ start_event1 = threading.Event() finish_event1 = threading.Event() event1 = create_sync_event(start_event1, finish_event1) @@ -127,4 +145,36 @@ def test_use_free_thread_parallel_execution(acquisition_event_executor): acquisition_event_executor.shutdown() assert event1.executed - assert event2.executed \ No newline at end of file + assert event2.executed + + +def test_single_execution_with_free_thread(acquisition_event_executor): + """ + Test that each event is executed only once, even when using use_free_thread=True. + Verifies that events are not executed multiple times regardless of submission method. + """ + start_event1 = threading.Event() + finish_event1 = threading.Event() + event1 = create_sync_event(start_event1, finish_event1) + + start_event2 = threading.Event() + finish_event2 = threading.Event() + event2 = create_sync_event(start_event2, finish_event2) + + acquisition_event_executor.submit(event1) + acquisition_event_executor.submit(event2, use_free_thread=True) + + # Wait for both events to start executing + assert start_event1.wait(timeout=5) + assert start_event2.wait(timeout=5) + + # Signal both events to finish + finish_event1.set() + finish_event2.set() + + acquisition_event_executor.shutdown() + + assert event1.executed + assert event2.executed + assert event1.execute_count == 1 + assert event2.execute_count == 1 \ No newline at end of file From 762aab644939d9a8e9b5e38d20ea818b0d563629 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 9 Jul 2024 12:06:08 +0200 Subject: [PATCH 15/20] progress in an intermediate state --- .github/ISSUE_TEMPLATE/bug_report.md | 4 +- .github/workflows/build_and_test.yml | 2 +- .../application_notebooks/PSF_viewer.py | 6 +- pycromanager/acq_future.py | 6 +- pycromanager/acquisition/RAMStorage_java.py | 6 +- .../acquisition/acq_eng_py/internal/engine.py | 12 +- .../acq_eng_py/main/acq_eng_metadata.py | 4 +- .../acq_eng_py/main/acquisition_event.py | 4 +- .../acquisition/acquisition_superclass.py | 56 +-- .../acquisition/java_backend_acquisitions.py | 34 +- .../python_backend_acquisitions.py | 6 +- .../__init__.py | 0 .../micromanager}/__init__.py | 0 .../micromanager/mm_device_implementations.py | 288 ++++++++++++++ .../__init__.py | 0 .../camera_events.py} | 19 +- .../event_implementations/misc_events.py | 38 ++ .../event_implementations/multi_d_events.py | 198 ++++++++++ .../positioner_events.py | 46 +++ .../mm_device_implementations.py | 110 ------ .../__init__.py | 0 .../integration_tests/camera_tests.py | 84 ++++ .../continuous_capture_alternatives.py} | 19 +- .../integration_tests/multi_d_testing.py | 91 +++++ .../sandbox_test_micromanager_device.py | 28 ++ .../{test => integration_tests}/sbox.py | 13 +- .../execution_engine/internal/device.py | 138 ------- .../{test => kernel}/__init__.py | 0 .../acq_event_base.py} | 36 +- .../{ => kernel}/acq_future.py | 16 +- .../{ => kernel}/data_coords.py | 71 ++-- .../{ => kernel}/data_handler.py | 26 +- .../data_storage_api.py} | 14 +- .../execution_engine/kernel/device.py | 169 ++++++++ .../device_types_base.py} | 45 ++- .../execution_engine/{ => kernel}/executor.py | 85 ++-- .../test}/__init__.py | 0 .../test}/test_acquisition_futures.py | 26 +- .../test}/test_data_coords.py | 43 ++- .../test}/test_data_handler.py | 18 +- .../kernel/test/test_executor.py | 363 ++++++++++++++++++ .../NDTiffandRAM.py} | 20 +- .../__init__.py | 0 .../storage_implementations/test/__init__.py | 0 .../test/test_NDTiff_and_RAM.py} | 0 .../execution_engine/test/integration_test.py | 61 --- .../test/integration_tests/camera_tests.py | 87 ----- .../test/unit_tests/test_device_metaclass.py | 131 ------- .../test/unit_tests/test_executor.py | 180 --------- pycromanager/headless.py | 4 +- pycromanager/test/conftest.py | 4 +- pycromanager/test/test_acquisition.py | 4 +- pycromanager/test/test_callback_functions.py | 2 +- pycromanager/test/test_notifications.py | 4 +- scripts/camera_triggering/genIexamples.py | 4 +- scripts/custom_axis_acq.py | 2 +- scripts/external_camera_trigger.py | 2 +- scripts/generate_ndtiff_test.py | 2 +- scripts/headless_demo.py | 2 +- scripts/image_processor.py | 2 +- scripts/image_processor_divert.py | 2 +- scripts/image_processor_multiple.py | 2 +- scripts/magellan_surfaces.py | 2 +- scripts/multi_d_acq.py | 2 +- scripts/speed_test.py | 2 +- scripts/string_axes.py | 4 +- 66 files changed, 1680 insertions(+), 969 deletions(-) rename pycromanager/execution_engine/{apis => device_implementations}/__init__.py (100%) rename pycromanager/execution_engine/{base_classes => device_implementations/micromanager}/__init__.py (100%) create mode 100644 pycromanager/execution_engine/device_implementations/micromanager/mm_device_implementations.py rename pycromanager/execution_engine/{implementations => event_implementations}/__init__.py (100%) rename pycromanager/execution_engine/{implementations/event_implementations.py => event_implementations/camera_events.py} (84%) create mode 100644 pycromanager/execution_engine/event_implementations/misc_events.py create mode 100644 pycromanager/execution_engine/event_implementations/multi_d_events.py create mode 100644 pycromanager/execution_engine/event_implementations/positioner_events.py delete mode 100644 pycromanager/execution_engine/implementations/mm_device_implementations.py rename pycromanager/execution_engine/{internal => integration_tests}/__init__.py (100%) create mode 100644 pycromanager/execution_engine/integration_tests/camera_tests.py rename pycromanager/execution_engine/{test/integration_tests/continuous_capture_test.py => integration_tests/continuous_capture_alternatives.py} (79%) create mode 100644 pycromanager/execution_engine/integration_tests/multi_d_testing.py create mode 100644 pycromanager/execution_engine/integration_tests/sandbox_test_micromanager_device.py rename pycromanager/execution_engine/{test => integration_tests}/sbox.py (67%) delete mode 100644 pycromanager/execution_engine/internal/device.py rename pycromanager/execution_engine/{test => kernel}/__init__.py (100%) rename pycromanager/execution_engine/{base_classes/acq_events.py => kernel/acq_event_base.py} (77%) rename pycromanager/execution_engine/{ => kernel}/acq_future.py (94%) rename pycromanager/execution_engine/{ => kernel}/data_coords.py (81%) rename pycromanager/execution_engine/{ => kernel}/data_handler.py (91%) rename pycromanager/execution_engine/{apis/data_storage.py => kernel/data_storage_api.py} (90%) create mode 100644 pycromanager/execution_engine/kernel/device.py rename pycromanager/execution_engine/{base_classes/device_types.py => kernel/device_types_base.py} (57%) rename pycromanager/execution_engine/{ => kernel}/executor.py (77%) rename pycromanager/execution_engine/{test/integration_tests => kernel/test}/__init__.py (100%) rename pycromanager/execution_engine/{test/unit_tests => kernel/test}/test_acquisition_futures.py (84%) rename pycromanager/execution_engine/{test/unit_tests => kernel/test}/test_data_coords.py (78%) rename pycromanager/execution_engine/{test/unit_tests => kernel/test}/test_data_handler.py (87%) create mode 100644 pycromanager/execution_engine/kernel/test/test_executor.py rename pycromanager/execution_engine/{implementations/data_storage_implementations.py => storage_implementations/NDTiffandRAM.py} (76%) rename pycromanager/execution_engine/{test/unit_tests => storage_implementations}/__init__.py (100%) create mode 100644 pycromanager/execution_engine/storage_implementations/test/__init__.py rename pycromanager/execution_engine/{test/unit_tests/test_data_storage.py => storage_implementations/test/test_NDTiff_and_RAM.py} (100%) delete mode 100644 pycromanager/execution_engine/test/integration_test.py delete mode 100644 pycromanager/execution_engine/test/integration_tests/camera_tests.py delete mode 100644 pycromanager/execution_engine/test/unit_tests/test_device_metaclass.py delete mode 100644 pycromanager/execution_engine/test/unit_tests/test_executor.py diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 31fbb0a2..bca952d9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -19,7 +19,7 @@ labels: bug configuration, it is likely unrelated to pycro-manager --> - - ### Problem diff --git a/.github/workflows/build_and_deploy.yml b/.github/workflows/build_and_deploy.yml index 5f07edd5..38197d4b 100644 --- a/.github/workflows/build_and_deploy.yml +++ b/.github/workflows/build_and_deploy.yml @@ -1,9 +1,9 @@ # If changes to java versions -# Deploy execution_engine version of pycromanager java to maven, +# Deploy new version of pycromanager java to maven, # and then update micro-manager ivy file and make PR # If changes to python version -# await for PR to merge into execution_engine MM version -# then publish execution_engine version to pypi +# await for PR to merge into new MM version +# then publish new version to pypi name: Build and deploy Java and Python components of Pycro-Manager @@ -20,7 +20,7 @@ concurrency: PM_version_update jobs: - # Use a filter to determine whether to deploy execution_engine java version + # Use a filter to determine whether to deploy new java version check-java-version: if: ${{ github.repository == 'micro-manager/pycro-manager' }} runs-on: ubuntu-latest @@ -123,7 +123,7 @@ jobs: repository: micro-manager/pycro-manager ref: main - - name: Wait for execution_engine version to be available and update ivy.xml + - name: Wait for new version to be available and update ivy.xml run: | cd pycro-manager git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" @@ -194,7 +194,7 @@ jobs: git push origin --delete dependency_update_from_pycromanager - # After java deps have updated in MM, time to check if a execution_engine python version is needed + # After java deps have updated in MM, time to check if a new python version is needed check-python-version: if: ${{ github.repository == 'micro-manager/pycro-manager' }} runs-on: ubuntu-latest @@ -213,12 +213,12 @@ jobs: pypi-deploy: - # Once any changes to java have gone into micro-manager, a execution_engine version of PM can be deployed to PyPi + # Once any changes to java have gone into micro-manager, a new version of PM can be deployed to PyPi needs: [check-java-version, mm-update, maven-deploy, check-python-version] - name: Deploy execution_engine version to PyPi if needed + name: Deploy new version to PyPi if needed # Run if - # java update is complete without errors and execution_engine version is merged into MM main (or no java update) + # java update is complete without errors and new version is merged into MM main (or no java update) # and python version changed # weird syntax needed, see: https://github.com/actions/runner/issues/491#issuecomment-850884422 if: ${{ github.repository == 'micro-manager/pycro-manager' && always() && needs.check-python-version.outputs.changed == 'true' && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled')}} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index d5905918..8bc18164 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1,4 +1,4 @@ -name: Build and integration_tests +name: Build and test on: pull_request: diff --git a/build_automation/update_PycroManagerJava.py b/build_automation/update_PycroManagerJava.py index 34e253ca..aa8efaab 100644 --- a/build_automation/update_PycroManagerJava.py +++ b/build_automation/update_PycroManagerJava.py @@ -39,7 +39,7 @@ def read_versions(root): for lib_name in main_branch_versions.keys(): old_version = main_branch_versions[lib_name] new_version = updated_versions[lib_name] - print('\t', lib_name, '\t\told: ', old_version, '\texecution_engine: ', new_version) + print('\t', lib_name, '\t\told: ', old_version, '\tnew: ', new_version) if new_version > old_version: if new_version.minor > old_version.minor: minor_version_increased = True diff --git a/docs/source/application_notebooks/PSF_viewer.py b/docs/source/application_notebooks/PSF_viewer.py index cfe70607..84d4dac0 100644 --- a/docs/source/application_notebooks/PSF_viewer.py +++ b/docs/source/application_notebooks/PSF_viewer.py @@ -163,8 +163,8 @@ def grab_image(image, metadata): def acquire_data(z_range): - """ micro-manager data acquisition. Creates acquisition event_implementations for z-stack. - This example: use custom event_implementations, not multi_d_acquisition because the + """ micro-manager data acquisition. Creates acquisition events for z-stack. + This example: use custom events, not multi_d_acquisition because the z-stage is not run from micro-manager but controlled via external DAQ.""" with JavaBackendAcquisition(directory=None, name=None, show_display=True, @@ -177,7 +177,7 @@ def acquire_data(z_range): def acquire_multid(z_range): - """ micro-manager data acquisition. Creates acquisition event_implementations for z-stack. + """ micro-manager data acquisition. Creates acquisition events for z-stack. This example: use multi_d_acquisition because the z-stage is run from micro-manager. Unless hardware triggering is set up in micro-manager, this will be fairly slow: diff --git a/misc/PropertyMap.py b/misc/PropertyMap.py index 8444dc14..cda27221 100644 --- a/misc/PropertyMap.py +++ b/misc/PropertyMap.py @@ -82,7 +82,7 @@ def encode(self) -> dict: @staticmethod def hook(d: dict): - """Check if a dictionary represents an instance of this class and return a execution_engine instance. If this dict does not match + """Check if a dictionary represents an instance of this class and return a new instance. If this dict does not match the correct pattern then just return the original dict.""" if "type" in d and d["type"] in Property.pTypes.values(): if "scalar" in d: @@ -101,7 +101,7 @@ def encode(self) -> dict: @staticmethod def hook(d: dict): - """Check if a dictionary represents an instance of this class and return a execution_engine instance. If this dict does not match + """Check if a dictionary represents an instance of this class and return a new instance. If this dict does not match the correct pattern then just return the original dict.""" if "type" in d and d["type"] in Property.pTypes.values(): if "array" in d: @@ -239,7 +239,7 @@ def __getitem__(self, idx: typing.Union[slice, int]) -> PropertyMap: if __name__ == "__main__": - """Test that opens a position list file, saves it to a execution_engine file and then checks that both versions + """Test that opens a position list file, saves it to a new file and then checks that both versions are still identical""" path1 = r"PositionList.pos" path2 = r"PositionListOut.pos" diff --git a/misc/examples/positionTransformation.py b/misc/examples/positionTransformation.py index cd588b01..66ab8c31 100644 --- a/misc/examples/positionTransformation.py +++ b/misc/examples/positionTransformation.py @@ -1,8 +1,8 @@ from misc.positions import PositionList -"""This example demonstrates how to generate execution_engine imaging positions from a set of positions after the sample has been picked up and likely shifted or rotated. +"""This example demonstrates how to generate new imaging positions from a set of positions after the sample has been picked up and likely shifted or rotated. This method relies on measuring a set of reference positions (at least 3) before and after moving the dish. You can then use these positions to generate an -affine transform. This affine transform can then be applied to your original cell positions in order to generate a execution_engine set of positions for the same cells. +affine transform. This affine transform can then be applied to your original cell positions in order to generate a new set of positions for the same cells. In the case of a standard cell culture dish it is best to use the corners of the glass coverslip as your reference locations. """ preTreatRefPositions = PositionList.load( @@ -19,10 +19,10 @@ ) # Load the positions of the cells we are measuring before the dish was removed. postTreatCellPositions = preTreatCellPositions.applyAffineTransform( transformMatrix -) # Transform the cell positions to the execution_engine expected locations. +) # Transform the cell positions to the new expected locations. postTreatCellPositions.save( r"experimentPath\transformedPositions.pos" -) # Save the execution_engine positions to a file that can be loaded by Micro-Manager. +) # Save the new positions to a file that can be loaded by Micro-Manager. preTreatRefPositions.plot() postTreatRefPositions.plot() diff --git a/misc/positions.py b/misc/positions.py index f3ccaebe..cc7b947a 100644 --- a/misc/positions.py +++ b/misc/positions.py @@ -221,7 +221,7 @@ def renameXYStage(self, label: str): """Change the name of the xy stage. Args: - label: The execution_engine name for the xy Stage + label: The new name for the xy Stage """ self.defaultXYStage = label self.getXYPosition().renameStage(label) @@ -230,7 +230,7 @@ def copy(self) -> MultiStagePosition: """Creates a copy fo the object Returns: - A execution_engine `MultiStagePosition` object. + A new `MultiStagePosition` object. """ return copy.deepcopy(self) @@ -340,7 +340,7 @@ def renameStage(self, label) -> PositionList: """Change the name of the xy stage. Args: - label: The execution_engine name for the xy Stage + label: The new name for the xy Stage Returns: A reference to this object @@ -523,7 +523,7 @@ def hover(event): def generateList(data: np.ndarray) -> PositionList: - """Example function to create a brand execution_engine position list in python. + """Example function to create a brand new position list in python. Args: data: An Nx2 array of xy coordinates. These coordinates will be converted to a PositionList which can be diff --git a/pycromanager/acq_future.py b/pycromanager/acq_future.py index 382ed7f9..94ff746d 100644 --- a/pycromanager/acq_future.py +++ b/pycromanager/acq_future.py @@ -41,7 +41,7 @@ def _add_notifications(self, axes_or_axes_list): def _notify(self, notification): """ - Called by the kernel notification dispatcher in order so that it can check off that the notification was + Called by the internal notification dispatcher in order so that it can check off that the notification was received. Want to store this, rather than just waiting around for it, in case the await methods are called after the notification has already been sent. """ @@ -66,10 +66,10 @@ def _notify(self, notification): def _monitor_axes(self, axes_or_axes_list): """ - In the case where the acquisition future is constructed for a Generator, the event_implementations to be monitored + In the case where the acquisition future is constructed for a Generator, the events to be monitored are not known until the generator is run. If user code awaits for an event and that event has already passed, the future must be able to check if the event has already passed and return immediately. - So this function is called by the generator as event_implementations are created to add them to the list of event_implementations to + So this function is called by the generator as events are created to add them to the list of events to keep track of. :param axes_or_axes_list: the axes of the event diff --git a/pycromanager/acquisition/acq_eng_py/internal/engine.py b/pycromanager/acquisition/acq_eng_py/internal/engine.py index 6c45a293..6018d783 100644 --- a/pycromanager/acquisition/acq_eng_py/internal/engine.py +++ b/pycromanager/acquisition/acq_eng_py/internal/engine.py @@ -1,16 +1,14 @@ import traceback +from concurrent.futures import Future from concurrent.futures import ThreadPoolExecutor import time import datetime -# from pycromanager.acquisition.execution_engine.acq_events import AcquisitionEvent -# TODO -AcquisitionEvent = None - +from pycromanager.acquisition.acq_eng_py.main.acquisition_event import AcquisitionEvent +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata from pycromanager.acquisition.acq_eng_py.internal.hardware_sequences import HardwareSequences import pymmcore from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification -# from pycromanager.acquisition.python_backend_acquisitions import PythonBackendAcquisition HARDWARE_ERROR_RETRIES = 6 DELAY_BETWEEN_RETRIES_MS = 5 @@ -35,7 +33,7 @@ def shutdown(self): @staticmethod def get_core(): - return Engine.singleton._core + return Engine.singleton.core @staticmethod def get_instance(): @@ -43,22 +41,54 @@ def get_instance(): def finish_acquisition(self, acq): def finish_acquisition_inner(): + if acq.is_debug_mode(): + Engine.get_core().logMessage("recieved acquisition finished signal") self.sequenced_events.clear() - self.execute_acquisition_event(acq, None) + if acq.is_debug_mode(): + Engine.get_core().logMessage("creating acquisition finished event") + self.execute_acquisition_event(AcquisitionEvent.create_acquisition_finished_event(acq)) acq.block_until_events_finished() return self.event_generator_executor.submit(finish_acquisition_inner) - def submit_event_iterator(self, acquisition, event_generator): + def submit_event_iterator(self, event_iterator): + def submit_event_iterator_inner(): + acq = None + while True: + try: + event = next(event_iterator, None) + except StopIteration: + traceback.print_exc() + break + if event is None: + break # iterator exhausted + acq = event.acquisition_ + if acq.is_debug_mode(): + Engine.get_core().logMessage("got event: " + event.to_string()) + for h in event.acquisition_.get_event_generation_hooks(): + event = h.run(event) + if event is None: + return + while event.acquisition_.is_paused(): + time.sleep(0.005) + try: + if acq.is_abort_requested(): + if acq.is_debug_mode(): + Engine.get_core().logMessage("acquisition aborted") + return + image_acquired_future = self.process_acquisition_event(event) + image_acquired_future.result() + + except Exception as ex: + traceback.print_exc() + acq.abort(ex) + raise ex - for event in event_generator: - image_acquired_future = self.acq_executor.submit(lambda: self.execute_acquisition_event(acquisition, event)) + last_image_future = self.process_acquisition_event(AcquisitionEvent.create_acquisition_sequence_end_event(acq)) + last_image_future.result() - # TODO: before, this used to use the event generator thread to do any transpiling (i.e. checking for sequenceing) - # in order to (theoretically) improve speed. Now we're just returning the image acquired future directly. - # Probably doesn't matter becuase this is suppoed to be async anyway - # return self.event_generator_executor.submit(submit_event_iterator_inner) + return self.event_generator_executor.submit(submit_event_iterator_inner) def check_for_default_devices(self, event: AcquisitionEvent): @@ -69,65 +99,134 @@ def check_for_default_devices(self, event: AcquisitionEvent): if event.get_x_position() is not None and (xy_stage is None or xy_stage == ""): raise Exception("Event requires an x position, but no Core-XYStage device is set") - # def process_acquisition_event(self, acquisition: PythonBackendAcquisition, - # event: AcquisitionEvent) -> Future: - - # TODO - # def process_acquisition_event_inner(): - # try: - # self.check_for_default_devices(event) - # if event.acquisition_.is_debug_mode(): - # self.core.logMessage("Processing event: " + str(event)) - # self.core.logMessage("checking for sequencing") - # if not self.sequenced_events and not event.is_acquisition_sequence_end_event(): - # self.sequenced_events.append(event) - # elif self.is_sequencable(self.sequenced_events, event, len(self.sequenced_events) + 1): - # # merge event into the sequence - # self.sequenced_events.append(event) - # else: - # # all event_implementations - # sequence_event = self.merge_sequence_event(self.sequenced_events) - # self.sequenced_events.clear() - # # Add in the start of the execution_engine sequence - # if not event.is_acquisition_sequence_end_event(): - # self.sequenced_events.append(event) - # if event.acquisition_.is_debug_mode(): - # self.core.logMessage("executing acquisition event") - # try: - # self.execute_acquisition_event(sequence_event) - # except HardwareControlException as e: - # raise e - # except Exception as e: - # traceback.print_exc() - # if self.core.is_sequence_running(): - # self.core.stop_sequence_acquisition() - # raise e - # - # - # return self.acq_executor.submit(process_acquisition_event_inner) - - def execute_acquisition_event(self, acquisition,event: AcquisitionEvent): + def process_acquisition_event(self, event: AcquisitionEvent) -> Future: + def process_acquisition_event_inner(): + try: + self.check_for_default_devices(event) + if event.acquisition_.is_debug_mode(): + self.core.logMessage("Processing event: " + str(event)) + self.core.logMessage("checking for sequencing") + if not self.sequenced_events and not event.is_acquisition_sequence_end_event(): + self.sequenced_events.append(event) + elif self.is_sequencable(self.sequenced_events, event, len(self.sequenced_events) + 1): + # merge event into the sequence + self.sequenced_events.append(event) + else: + # all events + sequence_event = self.merge_sequence_event(self.sequenced_events) + self.sequenced_events.clear() + # Add in the start of the new sequence + if not event.is_acquisition_sequence_end_event(): + self.sequenced_events.append(event) + if event.acquisition_.is_debug_mode(): + self.core.logMessage("executing acquisition event") + try: + self.execute_acquisition_event(sequence_event) + except HardwareControlException as e: + raise e + except Exception as e: + traceback.print_exc() + if self.core.is_sequence_running(): + self.core.stop_sequence_acquisition() + raise e + + + return self.acq_executor.submit(process_acquisition_event_inner) + + def execute_acquisition_event(self, event: AcquisitionEvent): # check if we should pause until the minimum start time of the event has occured - # while event.get_minimum_start_time_absolute() is not None and \ - # time.time() * 1000 < event.get_minimum_start_time_absolute(): - # wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 - # event.acquisition_.block_unless_aborted(wait_time) - - if event is not None: - # execute the event - for instruction in event.device_instructions: - instruction.execute() - else: + while event.get_minimum_start_time_absolute() is not None and \ + time.time() * 1000 < event.get_minimum_start_time_absolute(): + wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 + event.acquisition_.block_unless_aborted(wait_time) + + if event.is_acquisition_finished_event(): # signal to finish saving thread and mark acquisition as finished - if acquisition._are_events_finished(): + if event.acquisition_.are_events_finished(): return # Duplicate finishing event, possibly from x-ing out viewer - acquisition._add_to_output(None) - acquisition._post_notification(AcqNotification.create_acq_events_finished_notification()) + # send message acquisition finished message so things shut down properly + for h in event.acquisition_.get_event_generation_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_before_hardware_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_after_hardware_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_after_camera_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_after_exposure_hooks(): + h.run(event) + h.close() + event.acquisition_.add_to_output(self.core.TaggedImage(None, None)) + event.acquisition_.post_notification(AcqNotification.create_acq_events_finished_notification()) + + else: + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.PRE_HARDWARE)) + for h in event.acquisition_.get_before_hardware_hooks(): + event = h.run(event) + if event is None: + return # The hook cancelled this event + self.abort_if_requested(event, None) + hardware_sequences_in_progress = HardwareSequences() + try: + self.prepare_hardware(event, hardware_sequences_in_progress) + except HardwareControlException as e: + self.stop_hardware_sequences(hardware_sequences_in_progress) + raise e + + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.PRE_Z_DRIVE)) + for h in event.acquisition_.get_before_z_hooks(): + event = h.run(event) + if event is None: + return # The hook cancelled this event + self.abort_if_requested(event, None) + + try: + self.start_z_drive(event, hardware_sequences_in_progress) + except HardwareControlException as e: + self.stop_hardware_sequences(hardware_sequences_in_progress) + raise e + + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.POST_HARDWARE)) + for h in event.acquisition_.get_after_hardware_hooks(): + event = h.run(event) + if event is None: + return # The hook cancelled this event + self.abort_if_requested(event, hardware_sequences_in_progress) + # Hardware hook may have modified wait time, so check again if we should + # pause until the minimum start time of the event has occurred. + while event.get_minimum_start_time_absolute() is not None and \ + time.time() * 1000 < event.get_minimum_start_time_absolute(): + try: + self.abort_if_requested(event, hardware_sequences_in_progress) + wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 + event.acquisition_.block_unless_aborted(wait_time) + except Exception: + # Abort while waiting for next time point + return + + if event.should_acquire_image(): + if event.acquisition_.is_debug_mode(): + self.core.logMessage("acquiring image(s)") + try: + self.acquire_images(event, hardware_sequences_in_progress) + except TimeoutError: + # Don't abort on a timeout + # TODO: this could probably be an option added to the acquisition in the future + print("Timeout while acquiring images") + + # if the acquisition was aborted, make sure everything shuts down properly + self.abort_if_requested(event, hardware_sequences_in_progress) - def acquire_images(self, acquisition , - event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: + def acquire_images(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: """ Acquire 1 or more images in a sequence, add some metadata, then put them into an output queue. @@ -135,25 +234,176 @@ def acquire_images(self, acquisition , If the event is a sequence and a sequence acquisition is started in the core, It should be completed by the time this method returns. """ - - acquisition.post_notification(AcqNotification( - AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SEQUENCE_STARTED)) - - # add standard metadata - # TODO - # AcqEngMetadata.add_image_metadata(self.core, ti.tags, corresponding_event, - # current_time_ms - corresponding_event.acquisition_.get_start_time_ms(), - # exposure) - # add user metadata specified in the event - # acquisition.add_tags_to_tagged_image(ti.tags, corresponding_event.get_tags()) - - - - # acquisition._add_to_output(ti) - - # TODO stop sequences - # TODO: exceptiopn handling - # TODO: shutdown + camera_image_counts = event.get_camera_image_counts(self.core.get_camera_device()) + if event.get_sequence() is not None and len(event.get_sequence()) > 1: + # start sequences on one or more cameras + for camera_device_name, image_count in camera_image_counts.items(): + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SEQUENCE_STARTED)) + self.core.start_sequence_acquisition( + camera_device_name, camera_image_counts[camera_device_name], 0, True) + else: + # snap one image with no sequencing + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SNAP)) + if event.get_camera_device_name() is not None: + current_camera = self.core.get_camera_device() + width = self.core.get_image_width() + height = self.core.get_image_height() + self.core.set_camera_device(event.get_camera_device_name()) + self.core.snap_image() + self.core.set_camera_device(current_camera) + else: + # Unlike MMCoreJ, pymmcore does not automatically add this metadata when snapping, so need to do it manually + width = self.core.get_image_width() + height = self.core.get_image_height() + self.core.snap_image() + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_SNAP)) + for h in event.acquisition_.get_after_exposure_hooks(): + h.run(event) + + # get elapsed time + current_time_ms = time.time() * 1000 + if event.acquisition_.get_start_time_ms() == -1: + # first image, initialize + event.acquisition_.set_start_time_ms(current_time_ms) + + # need to assign events to images as they come out, assuming they might be in arbitrary order, + # but that each camera itself is ordered + multi_cam_adapter_camera_event_lists = None + if event.get_sequence() is not None: + multi_cam_adapter_camera_event_lists = {} + for cam_index in range(self.core.get_number_of_camera_channels()): + multi_cam_adapter_camera_event_lists[cam_index] = [] + for e in event.get_sequence(): + multi_cam_adapter_camera_event_lists[cam_index].append(e) + + # Run a hook after the camera sequence acquisition has started. This can be used for + # external triggering of the camera (when it is in sequence mode). + # note: SnapImage will block until exposure finishes. + # If it is desired that AfterCameraHooks trigger cameras + # in Snap mode, one possibility is that those hooks (or SnapImage) should run + # in a separate thread, started after snapImage is started. But there is no + # guarantee that the camera will be ready to accept a trigger at that point. + for h in event.acquisition_.get_after_camera_hooks(): + h.run(event) + + if event.acquisition_.is_debug_mode(): + self.core.log_message("images acquired, copying from core") + start_copy_time = time.time() + # Loop through and collect all acquired images. There will be + # (# of images in sequence) x (# of camera channels) of them + timeout = False + for i in range(0, 1 if event.get_sequence() is None else len(event.get_sequence())): + if timeout: + # Cancel the rest of the sequence + self.stop_hardware_sequences(hardware_sequences_in_progress) + break + try: + exposure = self.core.get_exposure() if event.get_exposure() is None else event.get_exposure() + except Exception as ex: + raise Exception("Couldnt get exposure form core") + num_cam_channels = self.core.get_number_of_camera_channels() + + need_to_run_after_exposure_hooks = len(event.acquisition_.get_after_exposure_hooks()) > 0 + for cam_index in range(num_cam_channels): + ti = None + camera_name = None + while ti is None: + if event.acquisition_.is_abort_requested(): + return + try: + if event.get_sequence() is not None and len(event.get_sequence()) > 1: + if self.core.is_buffer_overflowed(): + raise Exception("Sequence buffer overflow") + try: + ti = self.core.pop_next_tagged_image() + camera_name = ti.tags["Camera"] + except Exception as e: + # continue waiting + if not self.core.is_sequence_running() and self.core.get_remaining_image_count() == 0: + raise Exception("Expected images did not arrive in circular buffer") + # check if timeout has been exceeded. This is used in the case of a + # camera waiting for a trigger that never comes. + if event.get_sequence()[i].get_timeout_ms() is not None: + if time.time() - start_copy_time > event.get_sequence()[i].get_timeout_ms(): + timeout = True + self.core.stop_sequence_acquisition() + while self.core.is_sequence_running(): + time.sleep(0.001) + break + else: + try: + # TODO: probably there should be a timeout here too, but I'm + # not sure the snap_image system supports it (as opposed to sequences) + # This is a little different from the java version due to differences in metadata + # handling in the SWIG wrapper + camera_name = self.core.get_camera_device() + ti = self.core.get_tagged_image(cam_index, camera_name, height, width) + except Exception as e: + # continue waiting + pass + except Exception as ex: + # Sequence buffer overflow + e = HardwareControlException(str(ex)) + event.acquisition_.abort(e) + raise e + if need_to_run_after_exposure_hooks: + for camera_device_name in camera_image_counts.keys(): + if self.core.is_sequence_running(camera_device_name): + # all of the sequences are not yet done, so this will need to be handled + # on another iteration of the loop + break + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_EXPOSURE)) + for h in event.acquisition_.get_after_exposure_hooks(): + h.run(event) + need_to_run_after_exposure_hooks = False + + if timeout: + break + # Doesn't seem to be a version in the API in which you don't have to do this + actual_cam_index = cam_index + if "Multi Camera-CameraChannelIndex" in ti.tags.keys() : + actual_cam_index = ti.tags["Multi Camera-CameraChannelIndex"] + if num_cam_channels == 1: + # probably a mistake in the core.... + actual_cam_index = 0 # Override index because not using multi cam mode right now + + corresponding_event = event + if event.get_sequence() is not None: + # Find the event that corresponds to the camera that captured this image. + # This assumes that the images from a single camera are in order + # in the sequence, though different camera images may be interleaved + if event.get_sequence()[0].get_camera_device_name() is not None: + # camera is specified in the acquisition event. Find the first event that matches + # this camera name. + the_camera_name = camera_name + corresponding_event = next(filter(lambda + e: e.get_camera_device_name() is not None and e.get_camera_device_name() == the_camera_name, + multi_cam_adapter_camera_event_lists.get(actual_cam_index))) + multi_cam_adapter_camera_event_lists.get(actual_cam_index).remove(corresponding_event) + else: + # multi camera adapter or just using the default camera + corresponding_event = multi_cam_adapter_camera_event_lists.get(actual_cam_index).pop(0) + # add standard metadata + AcqEngMetadata.add_image_metadata(self.core, ti.tags, corresponding_event, + current_time_ms - corresponding_event.acquisition_.get_start_time_ms(), + exposure) + # add user metadata specified in the event + corresponding_event.acquisition_.add_tags_to_tagged_image(ti.tags, corresponding_event.get_tags()) + corresponding_event.acquisition_.add_to_image_metadata(ti.tags) + corresponding_event.acquisition_.add_to_output(ti) + + self.stop_hardware_sequences(hardware_sequences_in_progress) + + if event.get_sequence() is not None: + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_SEQUENCE_STOPPED)) + + if timeout: + raise TimeoutError("Timeout waiting for images to arrive in circular buffer") def abort_if_requested(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: if event.acquisition_.is_abort_requested(): @@ -190,7 +440,7 @@ def move_xy_stage(event): if event.is_xy_sequenced(): self.core.start_xy_stage_sequence(xy_stage) else: - # Could be sequenced over other device_implementations.py, in that case get xy position from first in sequence + # Could be sequenced over other devices, in that case get xy position from first in sequence prev_x_position = None if self.last_event is None else None if self.last_event.get_sequence() is None else \ self.last_event.get_sequence()[0].get_x_position() x_position = event.get_sequence()[ @@ -377,11 +627,11 @@ def change_additional_properties(event): # Compare to last event to see what needs to change if self.last_event is not None and self.last_event.acquisition_ != event.acquisition_: - self.last_event = None # Update all hardware if switching to a execution_engine acquisition + self.last_event = None # Update all hardware if switching to a new acquisition - # Other stage device_implementations.py - loop_hardware_command_retries(lambda: move_other_stage_devices(event), "Moving other stage device_implementations.py") + # Other stage devices + loop_hardware_command_retries(lambda: move_other_stage_devices(event), "Moving other stage devices") # XY Stage loop_hardware_command_retries(lambda: move_xy_stage(event), "Moving XY stage") # Channels @@ -496,7 +746,7 @@ def is_sequencable(self, previous_events, next_event, new_seq_length): return False # arbitrary z stages - # TODO implement sequences along arbitrary other stage device_implementations.py + # TODO implement sequences along arbitrary other stage devices for stage_device in previous_event.get_stage_device_names(): return False @@ -522,7 +772,7 @@ def is_sequencable(self, previous_events, next_event, new_seq_length): new_seq_length > self.core.get_exposure_sequence_max_length(self.core.get_camera_device()): return False - # If there is a nonzero delay between event_implementations, then its not sequencable + # If there is a nonzero delay between events, then its not sequencable if previous_event.get_t_index() is not None and next_event.get_t_index() is not None and \ previous_event.get_t_index() != next_event.get_t_index(): if previous_event.get_minimum_start_time_absolute() is not None and \ diff --git a/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py b/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py new file mode 100644 index 00000000..6bc6fa90 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py @@ -0,0 +1,267 @@ +import json +import queue +import traceback +import threading + +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +from pycromanager.acquisition.acq_eng_py.internal.engine import Engine +from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification +from pycromanager.acquisition.acq_eng_py.internal.notification_handler import NotificationHandler + + +class Acquisition(): + + EVENT_GENERATION_HOOK = 0 + # This hook runs before changes to the hardware (corresponding to the instructions in the + # event) are made + BEFORE_HARDWARE_HOOK = 1 + # This hook runs after all changes to the hardware except dor setting th Z drive have been + # made. This is useful for things such as autofocus. + BEFORE_Z_DRIVE = 2 + # This hook runs after changes to the hardware took place, but before camera exposure + # (either a snap or a sequence) is started + AFTER_HARDWARE_HOOK = 3 + # Hook runs after the camera sequence acquisition has started. This can be used for + # external triggering of the camera + AFTER_CAMERA_HOOK = 4 + # Hook runs after the camera exposure ended (when possible, before readout of the camera + # and availability of the images in memory). + AFTER_EXPOSURE_HOOK = 5 + + IMAGE_QUEUE_SIZE = 30 + + def __init__(self, sink, summary_metadata_processor=None, initialize=True): + self.xy_stage_ = None + self.events_finished_ = threading.Event() + self.abort_requested_ = threading.Event() + self.start_time_ms_ = -1 + self.paused_ = False + self.event_generation_hooks_ = [] + self.before_hardware_hooks_ = [] + self.before_z_hooks_ = [] + self.after_hardware_hooks_ = [] + self.after_camera_hooks_ = [] + self.after_exposure_hooks_ = [] + self.image_processors_ = [] + self.first_dequeue_ = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) + self.processor_output_queues_ = {} + self.debug_mode_ = False + self.abort_exception_ = None + self.image_metadata_processor_ = None + self.notification_handler_ = NotificationHandler() + self.started_ = False + self.core_ = Engine.get_core() + self.summary_metadata_processor_ = summary_metadata_processor + self.data_sink_ = sink + if initialize: + self.initialize() + + def post_notification(self, notification): + self.notification_handler_.post_notification(notification) + + def add_acq_notification_listener(self, post_notification_fn): + self.notification_handler_.add_listener(post_notification_fn) + + def get_data_sink(self): + return self.data_sink_ + + def set_debug_mode(self, debug): + self.debug_mode_ = debug + + def is_debug_mode(self): + return self.debug_mode_ + + def is_abort_requested(self): + return self.abort_requested_.is_set() + + def abort(self, e=None): + if e: + self.abort_exception_ = e + if self.abort_requested_.is_set(): + return + self.abort_requested_.set() + if self.is_paused(): + self.set_paused(False) + Engine.get_instance().finish_acquisition(self) + + def check_for_exceptions(self): + if self.abort_exception_: + raise self.abort_exception_ + + def add_to_summary_metadata(self, summary_metadata): + if self.summary_metadata_processor_: + self.summary_metadata_processor_(summary_metadata) + + def add_to_image_metadata(self, tags): + if self.image_metadata_processor_: + self.image_metadata_processor_(tags) + + def add_tags_to_tagged_image(self, tags, more_tags): + if not more_tags: + return + more_tags_object = json.loads(json.dumps(more_tags)) + tags['AcqEngMetadata.TAGS'] = more_tags_object + + def submit_event_iterator(self, evt): + if not self.started_: + self.start() + return Engine.get_instance().submit_event_iterator(evt) + + def start_saving_thread(self): + def saving_thread(acq): + try: + while True: + if acq.debug_mode_: + acq.core_.log_message(f"Image queue size: {len(acq.first_dequeue_)}") + if not acq.image_processors_: + if acq.debug_mode_: + acq.core_.log_message("waiting for image to save") + img = acq.first_dequeue_.get() + if acq.debug_mode_: + acq.core_.log_message("got image to save") + acq.save_image(img) + if img.tags is None and img.pix is None: + break + else: + img = acq.processor_output_queues_[acq.image_processors_[-1]].get() + if acq.data_sink_: + if acq.debug_mode_: + acq.core_.log_message("Saving image") + if img.tags is None and img.pix is None: + break + acq.save_image(img) + if acq.debug_mode_: + acq.core_.log_message("Finished saving image") + except Exception as ex: + traceback.print_exc() + acq.abort(ex) + finally: + acq.save_image(acq.core_.TaggedImage(None, None)) + + threading.Thread(target=saving_thread, args=(self,)).start() + + def add_image_processor(self, p): + if self.started_: + raise RuntimeError("Cannot add processor after acquisition started") + self.image_processors_.append(p) + self.processor_output_queues_[p] = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) + if len(self.image_processors_) == 1: + p.set_acq_and_queues(self, self.first_dequeue_, self.processor_output_queues_[p]) + else: + p.set_acq_and_queues(self, self.processor_output_queues_[self.image_processors_[-2]], + self.processor_output_queues_[self.image_processors_[-1]]) + + def add_hook(self, h, type_): + if self.started_: + raise RuntimeError("Cannot add hook after acquisition started") + if type_ == self.EVENT_GENERATION_HOOK: + self.event_generation_hooks_.append(h) + elif type_ == self.BEFORE_HARDWARE_HOOK: + self.before_hardware_hooks_.append(h) + elif type_ == self.BEFORE_Z_HOOK: + self.before_z_hooks_.append(h) + elif type_ == self.AFTER_HARDWARE_HOOK: + self.after_hardware_hooks_.append(h) + elif type_ == self.AFTER_CAMERA_HOOK: + self.after_camera_hooks_.append(h) + elif type_ == self.AFTER_EXPOSURE_HOOK: + self.after_exposure_hooks_.append(h) + + def initialize(self): + summary_metadata = AcqEngMetadata.make_summary_metadata(self.core_, self) + self.add_to_summary_metadata(summary_metadata) + if self.data_sink_: + self.data_sink_.initialize(summary_metadata) + + def start(self): + if self.data_sink_: + self.start_saving_thread() + self.post_notification(AcqNotification.create_acq_started_notification()) + self.started_ = True + + def save_image(self, image): + if image.tags is None and image.pix is None: + self.data_sink_.finish() + self.post_notification(AcqNotification.create_data_sink_finished_notification()) + else: + pixels, metadata = image.pix, image.tags + axes = AcqEngMetadata.get_axes(metadata) + self.data_sink_.put_image(axes, pixels, metadata) + self.post_notification(AcqNotification.create_image_saved_notification(axes)) + + def get_start_time_ms(self): + return self.start_time_ms_ + + def set_start_time_ms(self, time): + self.start_time_ms_ = time + + def is_paused(self): + return self.paused_ + + def is_started(self): + return self.started_ + + def set_paused(self, pause): + self.paused_ = pause + + def get_summary_metadata(self): + return self.summary_metadata_ + + # perhaps not needed in python like it is in java + # def anything_acquired(self): + # return not self.data_sink_ or self.data_sink_.anything_acquired() + + def add_image_metadata_processor(self, processor): + if not self.image_metadata_processor_: + self.image_metadata_processor_ = processor + else: + raise RuntimeError("Multiple metadata processors not supported") + + def get_event_generation_hooks(self): + return self.event_generation_hooks_ + + def get_before_hardware_hooks(self): + return self.before_hardware_hooks_ + + def get_before_z_hooks(self): + return self.before_z_hooks_ + + def get_after_hardware_hooks(self): + return self.after_hardware_hooks_ + + def get_after_camera_hooks(self): + return self.after_camera_hooks_ + + def get_after_exposure_hooks(self): + return self.after_exposure_hooks_ + + def add_to_output(self, ti): + try: + if ti.tags is None and ti.pix is None: + self.events_finished_.set() + self.first_dequeue_.put(ti) + except Exception as ex: + raise RuntimeError(ex) + + def finish(self): + Engine.get_instance().finish_acquisition(self) + + def are_events_finished(self): + return self.events_finished_.is_set() + + def block_until_events_finished(self, timeout=None): + """Blocks until all events have been processed.""" + self.events_finished_.wait(timeout) + + def block_unless_aborted(self, timeout_ms=None): + """Blocks until acquisition is aborted.""" + self.abort_requested_.wait(timeout_ms / 1000) + + + def get_image_transfer_queue_size(self): + return self.IMAGE_QUEUE_SIZE + + def get_image_transfer_queue_count(self): + return len(self.first_dequeue_) + + diff --git a/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py b/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py index 99243972..aef8807e 100644 --- a/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py +++ b/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py @@ -114,7 +114,7 @@ def make_summary_metadata(core, acq): AcqEngMetadata.set_pixel_type_from_byte_depth(summary, byte_depth) AcqEngMetadata.set_pixel_size_um(summary, core.get_pixel_size_um()) - # Info about core device_implementations.py + # Info about core devices try: AcqEngMetadata.set_core_xy(summary, core.get_xy_stage_device()) AcqEngMetadata.set_core_focus(summary, core.get_focus_device()) @@ -125,7 +125,7 @@ def make_summary_metadata(core, acq): AcqEngMetadata.set_core_slm(summary, core.get_slm_device()) AcqEngMetadata.set_core_shutter(summary, core.get_shutter_device()) except Exception as e: - raise RuntimeError("couldn't get info from core about device_implementations.py") + raise RuntimeError("couldn't get info from core about devices") # TODO restore # # Affine transform diff --git a/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py b/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py index 73660b12..22d7601d 100644 --- a/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py +++ b/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py @@ -1,451 +1,451 @@ -# from collections import namedtuple -# import json -# from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata -# -# class AcquisitionEvent: -# class SpecialFlag: -# ACQUISITION_FINISHED = "AcqusitionFinished" -# ACQUISITION_SEQUENCE_END = "AcqusitionSequenceEnd" -# -# def __init__(self, acq, sequence=None): -# self.acquisition_ = acq -# self.axisPositions_ = {} -# self.camera_ = None -# self.timeout_ms_ = None -# self.configGroup_ = None -# self.configPreset_ = None -# self.exposure_ = None -# self.miniumumStartTime_ms_ = None -# self.zPosition_ = None -# self.xPosition_ = None -# self.yPosition_ = None -# self.stageCoordinates_ = {} -# self.stageDeviceNamesToAxisNames_ = {} -# self.tags_ = {} -# self.acquireImage_ = None -# self.slmImage_ = None -# self.properties_ = set() -# self.sequence_ = None -# self.xySequenced_ = False -# self.zSequenced_ = False -# self.exposureSequenced_ = False -# self.configGroupSequenced_ = False -# self.specialFlag_ = None -# -# if sequence: -# self.acquisition_ = sequence[0].acquisition_ -# self.miniumumStartTime_ms_ = sequence[0].miniumumStartTime_ms_ -# self.sequence_ = list(sequence) -# zPosSet = set() -# xPosSet = set() -# yPosSet = set() -# exposureSet = set() -# configSet = set() -# for event in self.sequence_: -# if event.zPosition_ is not None: -# zPosSet.add(event.get_z_position()) -# if event.xPosition_ is not None: -# xPosSet.add(event.get_x_position()) -# if event.yPosition_ is not None: -# yPosSet.add(event.get_y_position()) -# if event.exposure_ is not None: -# exposureSet.add(event.get_exposure()) -# if event.configPreset_ is not None: -# configSet.add(event.get_config_preset()) -# self.exposureSequenced_ = len(exposureSet) > 1 -# self.configGroupSequenced_ = len(configSet) > 1 -# self.xySequenced_ = len(xPosSet) > 1 and len(yPosSet) > 1 -# self.zSequenced_ = len(zPosSet) > 1 -# if sequence[0].exposure_ and not self.exposureSequenced_: -# self.exposure_ = sequence[0].exposure_ -# -# -# def copy(self): -# e = AcquisitionEvent(self.acquisition_) -# e.axisPositions_ = self.axisPositions_.copy() -# e.configPreset_ = self.configPreset_ -# e.configGroup_ = self.configGroup_ -# e.stageCoordinates_ = self.stageCoordinates_.copy() -# e.stageDeviceNamesToAxisNames_ = self.stageDeviceNamesToAxisNames_.copy() -# e.xPosition_ = self.xPosition_ -# e.yPosition_ = self.yPosition_ -# e.zPosition_ = self.zPosition_ -# e.miniumumStartTime_ms_ = self.miniumumStartTime_ms_ -# e.slmImage_ = self.slmImage_ -# e.acquireImage_ = self.acquireImage_ -# e.properties_ = set(self.properties_) -# e.camera_ = self.camera_ -# e.timeout_ms_ = self.timeout_ms_ -# e.setTags(self.tags_) # Assuming setTags is a method in the class -# return e -# -# @staticmethod -# def event_to_json(e): -# data = {} -# -# if e.is_acquisition_finished_event(): -# data["special"] = "acquisition-end" -# return data -# elif e.is_acquisition_sequence_end_event(): -# data["special"] = "sequence-end" -# return data -# -# if e.miniumumStartTime_ms_: -# data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 -# -# if e.has_config_group(): -# data["config_group"] = [e.configGroup_, e.configPreset_] -# -# if e.exposure_ is not None: -# data["exposure"] = e.exposure_ -# -# if e.slmImage_: -# data["slm_pattern"] = e.slmImage_ -# -# if e.timeout_ms_ is not None: -# data["timeout_ms"] = e.timeout_ms_ -# -# axes = {axis: e.axisPositions_[axis] for axis in e.axisPositions_} -# if axes: -# data["axes"] = axes -# -# stage_positions = [[stageDevice, e.get_stage_single_axis_stage_position(stageDevice)] -# for stageDevice in e.get_stage_device_names()] -# if stage_positions: -# data["stage_positions"] = stage_positions -# -# if e.zPosition_ is not None: -# data["z"] = e.zPosition_ -# -# if e.xPosition_ is not None: -# data["x"] = e.xPosition_ -# -# if e.yPosition_ is not None: -# data["y"] = e.yPosition_ -# -# if e.camera_: -# data["camera"] = e.camera_ -# -# if e.get_tags() and e.get_tags(): # Assuming getTags is a method in the class -# data["tags"] = {key: value for key, value in e.getTags().items()} -# -# props = [[t.dev, t.prop, t.val] for t in e.properties_] -# if props: -# data["properties"] = props -# -# return data -# -# @staticmethod -# def event_from_json(data, acq): -# if "special" in data: -# if data["special"] == "acquisition-end": -# return AcquisitionEvent.create_acquisition_finished_event(acq) -# elif data["special"] == "sequence-end": -# return AcquisitionEvent.create_acquisition_sequence_end_event(acq) -# -# event = AcquisitionEvent(acq) -# -# if "axes" in data: -# for axisLabel, value in data["axes"].items(): -# event.axisPositions_[axisLabel] = value -# -# if "min_start_time" in data: -# event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) -# -# if "timeout_ms" in data: -# event.timeout_ms_ = float(data["timeout_ms"]) -# -# if "config_group" in data: -# event.configGroup_ = data["config_group"][0] -# event.configPreset_ = data["config_group"][1] -# -# if "exposure" in data: -# event.exposure_ = float(data["exposure"]) -# -# # if "timeout_ms" in data: -# # event.slmImage_ = float(data["timeout_ms"]) -# -# if "stage_positions" in data: -# for stagePos in data["stage_positions"]: -# event.set_stage_coordinate(stagePos[0], stagePos[1]) -# -# if "z" in data: -# event.zPosition_ = float(data["z"]) -# -# if "stage" in data: -# deviceName = data["stage"]["device_name"] -# position = data["stage"]["position"] -# event.axisPositions_[deviceName] = float(position) -# if "axis_name" in data["stage"]: -# axisName = data["stage"]["axis_name"] -# event.stageDeviceNamesToAxisNames_[deviceName] = axisName -# -# # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants -# # if isinstance(event.acquisition_, XYTiledAcquisition): -# # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( -# # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], -# # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] -# # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() -# # event.xPosition_ = xyPos.x -# # event.yPosition_ = xyPos.y -# -# if "x" in data: -# event.xPosition_ = float(data["x"]) -# -# if "y" in data: -# event.yPosition_ = float(data["y"]) -# -# if "slm_pattern" in data: -# event.slmImage_ = data["slm_pattern"] -# -# if "camera" in data: -# event.camera_ = data["camera"] -# -# if "tags" in data: -# tags = {key: value for key, value in data["tags"].items()} -# event.setTags(tags) -# -# if "properties" in data: -# for trip in data["properties"]: -# t = ThreeTuple(trip[0], trip[1], trip[2]) -# event.properties_.add(t) -# -# return event -# -# def to_json(self): -# if self.sequence_: -# event_implementations = [self.event_to_json(e) for e in self.sequence_] -# return event_implementations -# else: -# return self.event_to_json(self) -# -# @staticmethod -# def from_json(data, acq): -# if not isinstance(data, list): -# return AcquisitionEvent.event_from_json(data, acq) -# else: -# sequence = [AcquisitionEvent.event_from_json(event, acq) for event in data] -# return AcquisitionEvent(acq, sequence=sequence) -# -# def get_camera_device_name(self): -# return self.camera_ -# -# def set_camera_device_name(self, camera): -# self.camera_ = camera -# -# def get_additional_properties(self): -# return [(t.dev, t.prop, t.val) for t in self.properties_] -# -# def should_acquire_image(self): -# if self.sequence_: -# return True -# return self.configPreset_ is not None or self.axisPositions_ is not None -# -# def has_config_group(self): -# return self.configPreset_ is not None and self.configGroup_ is not None -# -# def get_config_preset(self): -# return self.configPreset_ -# -# def get_config_group(self): -# return self.configGroup_ -# -# def set_config_preset(self, config): -# self.configPreset_ = config -# -# def set_config_group(self, group): -# self.configGroup_ = group -# -# def get_exposure(self): -# return self.exposure_ -# -# def set_exposure(self, exposure): -# self.exposure_ = exposure -# -# def set_property(self, device, property, value): -# self.properties_.add(ThreeTuple(device, property, value)) -# -# def set_minimum_start_time(self, l): -# self.miniumumStartTime_ms_ = l -# -# def get_defined_axes(self): -# return set(self.axisPositions_.keys()) -# -# def set_axis_position(self, label, position): -# if position is None: -# raise Exception("Cannot set axis position to null") -# self.axisPositions_[label] = position -# -# def set_stage_coordinate(self, deviceName, v, axisName=None): -# self.stageCoordinates_[deviceName] = v -# self.stageDeviceNamesToAxisNames_[deviceName] = deviceName if axisName is None else axisName -# -# def get_stage_single_axis_stage_position(self, deviceName): -# return self.stageCoordinates_.get(deviceName) -# -# def get_axis_positions(self): -# return self.axisPositions_ -# -# def get_axis_position(self, label): -# return self.axisPositions_.get(label) -# -# def get_timeout_ms(self): -# return self.timeout_ms_ -# -# def set_time_index(self, index): -# self.set_axis_position(AcqEngMetadata.TIME_AXIS, index) -# -# def set_channel_name(self, name): -# self.set_axis_position(AcqEngMetadata.CHANNEL_AXIS, name) -# -# def get_slm_image(self): -# return self.slmImage_ -# -# def set_z(self, index, position): -# if index is not None: -# self.set_axis_position(AcqEngMetadata.Z_AXIS, index) -# self.zPosition_ = position -# -# def get_t_index(self): -# return self.get_axis_position(AcqEngMetadata.TIME_AXIS) -# -# def get_z_index(self): -# return self.get_axis_position(AcqEngMetadata.Z_AXIS) -# -# def get_device_axis_name(self, deviceName): -# if deviceName not in self.stageDeviceNamesToAxisNames_: -# raise Exception(f"No axis name for device {deviceName}. call setStageCoordinate first") -# return self.stageDeviceNamesToAxisNames_[deviceName] -# -# def get_stage_device_names(self): -# return set(self.stageDeviceNamesToAxisNames_.keys()) -# -# @staticmethod -# def create_acquisition_finished_event(acq): -# evt = AcquisitionEvent(acq) -# evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED -# return evt -# -# def is_acquisition_finished_event(self): -# return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED -# -# @staticmethod -# def create_acquisition_sequence_end_event(acq): -# evt = AcquisitionEvent(acq) -# evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END -# return evt -# -# def is_acquisition_sequence_end_event(self): -# return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END -# -# def get_z_position(self): -# return self.zPosition_ -# -# def get_minimum_start_time_absolute(self): -# if self.miniumumStartTime_ms_ is None: -# return None -# return self.acquisition_.get_start_time_ms() + self.miniumumStartTime_ms_ -# -# def get_sequence(self): -# return self.sequence_ -# -# def is_exposure_sequenced(self): -# return self.exposureSequenced_ -# -# def is_config_group_sequenced(self): -# return self.configGroupSequenced_ -# -# def is_xy_sequenced(self): -# return self.xySequenced_ -# -# def is_z_sequenced(self): -# return self.zSequenced_ -# -# def get_x_position(self): -# return self.xPosition_ -# -# def get_camera_image_counts(self, default_camera_device_name): -# """ -# Get the number of images to be acquired on each camera in a sequence event. -# For a non-sequence event, the number of images is 1, and the camera is the core camera. -# This is passed in as an argument in order to avoid this class talking to the core directly. -# -# Args: -# default_camera_device_name (str): Default camera device name. -# -# Returns: -# defaultdict: Dictionary containing the camera device names as keys and image counts as values. -# """ -# # Figure out how many images on each camera and start sequence with appropriate number on each -# camera_image_counts = {} -# camera_device_names = set() -# if self.get_sequence() is None: -# camera_image_counts[default_camera_device_name] = 1 -# return camera_image_counts -# -# for event in self.get_sequence(): -# camera_device_names.add(event.get_camera_device_name() if event.get_camera_device_name() is not None else -# default_camera_device_name) -# if None in camera_device_names: -# camera_device_names.remove(None) -# camera_device_names.add(default_camera_device_name) -# -# for camera_device_name in camera_device_names: -# camera_image_counts[camera_device_name] = sum(1 for event in self.get_sequence() -# if event.get_camera_device_name() == camera_device_name) -# -# if len(camera_device_names) == 1 and camera_device_name == default_camera_device_name: -# camera_image_counts[camera_device_name] = len(self.get_sequence()) -# -# return camera_image_counts -# -# def get_y_position(self): -# return self.yPosition_ -# -# def get_position_name(self): -# axisPosition_ = self.get_axis_position(AcqEngMetadata.POSITION_AXIS) -# if isinstance(axisPosition_, str): -# return axisPosition_ -# return None -# -# def set_x(self, x): -# self.xPosition_ = x -# -# def set_y(self, y): -# self.yPosition_ = y -# -# def set_tags(self, tags): -# self.tags_.clear() -# if tags: -# self.tags_.update(tags) -# -# def get_tags(self): -# return dict(self.tags_) -# -# def __str__(self): -# if self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED: -# return "Acq finished event" -# elif self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END: -# return "Acq sequence end event" -# -# builder = [] -# for deviceName in self.stageDeviceNamesToAxisNames_.keys(): -# builder.append(f"\t{deviceName}: {self.get_stage_single_axis_stage_position(deviceName)}") -# -# if self.zPosition_ is not None: -# builder.append(f"z {self.zPosition_}") -# if self.xPosition_ is not None: -# builder.append(f"x {self.xPosition_}") -# if self.yPosition_ is not None: -# builder.append(f"y {self.yPosition_}") -# -# for axis in self.axisPositions_.keys(): -# builder.append(f"\t{axis}: {self.axisPositions_[axis]}") -# -# if self.camera_ is not None: -# builder.append(f"\t{self.camera_}: {self.camera_}") -# -# return ' '.join(builder) -# -# -# ThreeTuple = namedtuple('ThreeTuple', ['dev', 'prop', 'val']) +from collections import namedtuple +import json +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata + +class AcquisitionEvent: + class SpecialFlag: + ACQUISITION_FINISHED = "AcqusitionFinished" + ACQUISITION_SEQUENCE_END = "AcqusitionSequenceEnd" + + def __init__(self, acq, sequence=None): + self.acquisition_ = acq + self.axisPositions_ = {} + self.camera_ = None + self.timeout_ms_ = None + self.configGroup_ = None + self.configPreset_ = None + self.exposure_ = None + self.miniumumStartTime_ms_ = None + self.zPosition_ = None + self.xPosition_ = None + self.yPosition_ = None + self.stageCoordinates_ = {} + self.stageDeviceNamesToAxisNames_ = {} + self.tags_ = {} + self.acquireImage_ = None + self.slmImage_ = None + self.properties_ = set() + self.sequence_ = None + self.xySequenced_ = False + self.zSequenced_ = False + self.exposureSequenced_ = False + self.configGroupSequenced_ = False + self.specialFlag_ = None + + if sequence: + self.acquisition_ = sequence[0].acquisition_ + self.miniumumStartTime_ms_ = sequence[0].miniumumStartTime_ms_ + self.sequence_ = list(sequence) + zPosSet = set() + xPosSet = set() + yPosSet = set() + exposureSet = set() + configSet = set() + for event in self.sequence_: + if event.zPosition_ is not None: + zPosSet.add(event.get_z_position()) + if event.xPosition_ is not None: + xPosSet.add(event.get_x_position()) + if event.yPosition_ is not None: + yPosSet.add(event.get_y_position()) + if event.exposure_ is not None: + exposureSet.add(event.get_exposure()) + if event.configPreset_ is not None: + configSet.add(event.get_config_preset()) + self.exposureSequenced_ = len(exposureSet) > 1 + self.configGroupSequenced_ = len(configSet) > 1 + self.xySequenced_ = len(xPosSet) > 1 and len(yPosSet) > 1 + self.zSequenced_ = len(zPosSet) > 1 + if sequence[0].exposure_ and not self.exposureSequenced_: + self.exposure_ = sequence[0].exposure_ + + + def copy(self): + e = AcquisitionEvent(self.acquisition_) + e.axisPositions_ = self.axisPositions_.copy() + e.configPreset_ = self.configPreset_ + e.configGroup_ = self.configGroup_ + e.stageCoordinates_ = self.stageCoordinates_.copy() + e.stageDeviceNamesToAxisNames_ = self.stageDeviceNamesToAxisNames_.copy() + e.xPosition_ = self.xPosition_ + e.yPosition_ = self.yPosition_ + e.zPosition_ = self.zPosition_ + e.miniumumStartTime_ms_ = self.miniumumStartTime_ms_ + e.slmImage_ = self.slmImage_ + e.acquireImage_ = self.acquireImage_ + e.properties_ = set(self.properties_) + e.camera_ = self.camera_ + e.timeout_ms_ = self.timeout_ms_ + e.setTags(self.tags_) # Assuming setTags is a method in the class + return e + + @staticmethod + def event_to_json(e): + data = {} + + if e.is_acquisition_finished_event(): + data["special"] = "acquisition-end" + return data + elif e.is_acquisition_sequence_end_event(): + data["special"] = "sequence-end" + return data + + if e.miniumumStartTime_ms_: + data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 + + if e.has_config_group(): + data["config_group"] = [e.configGroup_, e.configPreset_] + + if e.exposure_ is not None: + data["exposure"] = e.exposure_ + + if e.slmImage_: + data["slm_pattern"] = e.slmImage_ + + if e.timeout_ms_ is not None: + data["timeout_ms"] = e.timeout_ms_ + + axes = {axis: e.axisPositions_[axis] for axis in e.axisPositions_} + if axes: + data["axes"] = axes + + stage_positions = [[stageDevice, e.get_stage_single_axis_stage_position(stageDevice)] + for stageDevice in e.get_stage_device_names()] + if stage_positions: + data["stage_positions"] = stage_positions + + if e.zPosition_ is not None: + data["z"] = e.zPosition_ + + if e.xPosition_ is not None: + data["x"] = e.xPosition_ + + if e.yPosition_ is not None: + data["y"] = e.yPosition_ + + if e.camera_: + data["camera"] = e.camera_ + + if e.get_tags() and e.get_tags(): # Assuming getTags is a method in the class + data["tags"] = {key: value for key, value in e.getTags().items()} + + props = [[t.dev, t.prop, t.val] for t in e.properties_] + if props: + data["properties"] = props + + return data + + @staticmethod + def event_from_json(data, acq): + if "special" in data: + if data["special"] == "acquisition-end": + return AcquisitionEvent.create_acquisition_finished_event(acq) + elif data["special"] == "sequence-end": + return AcquisitionEvent.create_acquisition_sequence_end_event(acq) + + event = AcquisitionEvent(acq) + + if "axes" in data: + for axisLabel, value in data["axes"].items(): + event.axisPositions_[axisLabel] = value + + if "min_start_time" in data: + event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) + + if "timeout_ms" in data: + event.timeout_ms_ = float(data["timeout_ms"]) + + if "config_group" in data: + event.configGroup_ = data["config_group"][0] + event.configPreset_ = data["config_group"][1] + + if "exposure" in data: + event.exposure_ = float(data["exposure"]) + + # if "timeout_ms" in data: + # event.slmImage_ = float(data["timeout_ms"]) + + if "stage_positions" in data: + for stagePos in data["stage_positions"]: + event.set_stage_coordinate(stagePos[0], stagePos[1]) + + if "z" in data: + event.zPosition_ = float(data["z"]) + + if "stage" in data: + deviceName = data["stage"]["device_name"] + position = data["stage"]["position"] + event.axisPositions_[deviceName] = float(position) + if "axis_name" in data["stage"]: + axisName = data["stage"]["axis_name"] + event.stageDeviceNamesToAxisNames_[deviceName] = axisName + + # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants + # if isinstance(event.acquisition_, XYTiledAcquisition): + # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( + # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], + # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] + # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() + # event.xPosition_ = xyPos.x + # event.yPosition_ = xyPos.y + + if "x" in data: + event.xPosition_ = float(data["x"]) + + if "y" in data: + event.yPosition_ = float(data["y"]) + + if "slm_pattern" in data: + event.slmImage_ = data["slm_pattern"] + + if "camera" in data: + event.camera_ = data["camera"] + + if "tags" in data: + tags = {key: value for key, value in data["tags"].items()} + event.setTags(tags) + + if "properties" in data: + for trip in data["properties"]: + t = ThreeTuple(trip[0], trip[1], trip[2]) + event.properties_.add(t) + + return event + + def to_json(self): + if self.sequence_: + events = [self.event_to_json(e) for e in self.sequence_] + return events + else: + return self.event_to_json(self) + + @staticmethod + def from_json(data, acq): + if not isinstance(data, list): + return AcquisitionEvent.event_from_json(data, acq) + else: + sequence = [AcquisitionEvent.event_from_json(event, acq) for event in data] + return AcquisitionEvent(acq, sequence=sequence) + + def get_camera_device_name(self): + return self.camera_ + + def set_camera_device_name(self, camera): + self.camera_ = camera + + def get_additional_properties(self): + return [(t.dev, t.prop, t.val) for t in self.properties_] + + def should_acquire_image(self): + if self.sequence_: + return True + return self.configPreset_ is not None or self.axisPositions_ is not None + + def has_config_group(self): + return self.configPreset_ is not None and self.configGroup_ is not None + + def get_config_preset(self): + return self.configPreset_ + + def get_config_group(self): + return self.configGroup_ + + def set_config_preset(self, config): + self.configPreset_ = config + + def set_config_group(self, group): + self.configGroup_ = group + + def get_exposure(self): + return self.exposure_ + + def set_exposure(self, exposure): + self.exposure_ = exposure + + def set_property(self, device, property, value): + self.properties_.add(ThreeTuple(device, property, value)) + + def set_minimum_start_time(self, l): + self.miniumumStartTime_ms_ = l + + def get_defined_axes(self): + return set(self.axisPositions_.keys()) + + def set_axis_position(self, label, position): + if position is None: + raise Exception("Cannot set axis position to null") + self.axisPositions_[label] = position + + def set_stage_coordinate(self, deviceName, v, axisName=None): + self.stageCoordinates_[deviceName] = v + self.stageDeviceNamesToAxisNames_[deviceName] = deviceName if axisName is None else axisName + + def get_stage_single_axis_stage_position(self, deviceName): + return self.stageCoordinates_.get(deviceName) + + def get_axis_positions(self): + return self.axisPositions_ + + def get_axis_position(self, label): + return self.axisPositions_.get(label) + + def get_timeout_ms(self): + return self.timeout_ms_ + + def set_time_index(self, index): + self.set_axis_position(AcqEngMetadata.TIME_AXIS, index) + + def set_channel_name(self, name): + self.set_axis_position(AcqEngMetadata.CHANNEL_AXIS, name) + + def get_slm_image(self): + return self.slmImage_ + + def set_z(self, index, position): + if index is not None: + self.set_axis_position(AcqEngMetadata.Z_AXIS, index) + self.zPosition_ = position + + def get_t_index(self): + return self.get_axis_position(AcqEngMetadata.TIME_AXIS) + + def get_z_index(self): + return self.get_axis_position(AcqEngMetadata.Z_AXIS) + + def get_device_axis_name(self, deviceName): + if deviceName not in self.stageDeviceNamesToAxisNames_: + raise Exception(f"No axis name for device {deviceName}. call setStageCoordinate first") + return self.stageDeviceNamesToAxisNames_[deviceName] + + def get_stage_device_names(self): + return set(self.stageDeviceNamesToAxisNames_.keys()) + + @staticmethod + def create_acquisition_finished_event(acq): + evt = AcquisitionEvent(acq) + evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED + return evt + + def is_acquisition_finished_event(self): + return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED + + @staticmethod + def create_acquisition_sequence_end_event(acq): + evt = AcquisitionEvent(acq) + evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END + return evt + + def is_acquisition_sequence_end_event(self): + return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END + + def get_z_position(self): + return self.zPosition_ + + def get_minimum_start_time_absolute(self): + if self.miniumumStartTime_ms_ is None: + return None + return self.acquisition_.get_start_time_ms() + self.miniumumStartTime_ms_ + + def get_sequence(self): + return self.sequence_ + + def is_exposure_sequenced(self): + return self.exposureSequenced_ + + def is_config_group_sequenced(self): + return self.configGroupSequenced_ + + def is_xy_sequenced(self): + return self.xySequenced_ + + def is_z_sequenced(self): + return self.zSequenced_ + + def get_x_position(self): + return self.xPosition_ + + def get_camera_image_counts(self, default_camera_device_name): + """ + Get the number of images to be acquired on each camera in a sequence event. + For a non-sequence event, the number of images is 1, and the camera is the core camera. + This is passed in as an argument in order to avoid this class talking to the core directly. + + Args: + default_camera_device_name (str): Default camera device name. + + Returns: + defaultdict: Dictionary containing the camera device names as keys and image counts as values. + """ + # Figure out how many images on each camera and start sequence with appropriate number on each + camera_image_counts = {} + camera_device_names = set() + if self.get_sequence() is None: + camera_image_counts[default_camera_device_name] = 1 + return camera_image_counts + + for event in self.get_sequence(): + camera_device_names.add(event.get_camera_device_name() if event.get_camera_device_name() is not None else + default_camera_device_name) + if None in camera_device_names: + camera_device_names.remove(None) + camera_device_names.add(default_camera_device_name) + + for camera_device_name in camera_device_names: + camera_image_counts[camera_device_name] = sum(1 for event in self.get_sequence() + if event.get_camera_device_name() == camera_device_name) + + if len(camera_device_names) == 1 and camera_device_name == default_camera_device_name: + camera_image_counts[camera_device_name] = len(self.get_sequence()) + + return camera_image_counts + + def get_y_position(self): + return self.yPosition_ + + def get_position_name(self): + axisPosition_ = self.get_axis_position(AcqEngMetadata.POSITION_AXIS) + if isinstance(axisPosition_, str): + return axisPosition_ + return None + + def set_x(self, x): + self.xPosition_ = x + + def set_y(self, y): + self.yPosition_ = y + + def set_tags(self, tags): + self.tags_.clear() + if tags: + self.tags_.update(tags) + + def get_tags(self): + return dict(self.tags_) + + def __str__(self): + if self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED: + return "Acq finished event" + elif self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END: + return "Acq sequence end event" + + builder = [] + for deviceName in self.stageDeviceNamesToAxisNames_.keys(): + builder.append(f"\t{deviceName}: {self.get_stage_single_axis_stage_position(deviceName)}") + + if self.zPosition_ is not None: + builder.append(f"z {self.zPosition_}") + if self.xPosition_ is not None: + builder.append(f"x {self.xPosition_}") + if self.yPosition_ is not None: + builder.append(f"y {self.yPosition_}") + + for axis in self.axisPositions_.keys(): + builder.append(f"\t{axis}: {self.axisPositions_[axis]}") + + if self.camera_ is not None: + builder.append(f"\t{self.camera_}: {self.camera_}") + + return ' '.join(builder) + + +ThreeTuple = namedtuple('ThreeTuple', ['dev', 'prop', 'val']) diff --git a/pycromanager/acquisition/acquisition_superclass.py b/pycromanager/acquisition/acquisition_superclass.py index e1962fc8..ccf00db9 100644 --- a/pycromanager/acquisition/acquisition_superclass.py +++ b/pycromanager/acquisition/acquisition_superclass.py @@ -5,38 +5,38 @@ import copy import types import numpy as np -from typing import List, Iterable +from typing import Union, List, Iterable import warnings from abc import ABCMeta, abstractmethod from docstring_inheritance import NumpyDocstringInheritanceMeta import queue import weakref from pycromanager.acq_future import AcqNotification, AcquisitionFuture +import os import threading from inspect import signature +from typing import Generator from types import GeneratorType +import time from queue import Queue from typing import Generator, Dict, Union -# from pycromanager.acquisition.execution_engine.acq_events import AcquisitionEvent -AcquisitionEvent = None class EventQueue(Queue): """ - A queue that can hold both event_implementations/lists of event_implementations and generators of event_implementations/lists of event_implementations. When a generator is + A queue that can hold both events/lists of events and generators of events/lists of events. When a generator is retrieved from the queue, it will be automatically expanded and its elements will be the output of queue.get """ def __init__(self, maxsize=0): super().__init__(maxsize) - self.current_generator: Union[Generator[AcquisitionEvent, None, None], None] = None + self.current_generator: Union[Generator[Dict, None, None], None] = None def clear(self): self.queue.clear() self.current_generator = None - def put(self, item: Union[AcquisitionEvent, List[AcquisitionEvent], - Generator[AcquisitionEvent, None, None], None], block=True, timeout=None): + def put(self, item: Union[Dict, Generator[Dict, None, None]], block=True, timeout=None): if isinstance(item, dict): super().put(item, block, timeout) elif isinstance(item, list): @@ -50,7 +50,7 @@ def put(self, item: Union[AcquisitionEvent, List[AcquisitionEvent], else: raise TypeError("Event must be a dictionary, list or generator") - def get(self, block=True, timeout=None) -> AcquisitionEvent: + def get(self, block=True, timeout=None) -> Dict: while True: if self.current_generator is None: item = super().get(block, timeout) @@ -101,21 +101,21 @@ def __init__( image processing function that will be called on each image that gets acquired. Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict containing the corresponding image metadata. Or a three argument version is accepted, which accepts (image, - metadata, queue), where queue is a Queue object that holds upcoming acquisition event_implementations. The function + metadata, queue), where queue is a Queue object that holds upcoming acquisition events. The function should return either an (image, metadata) tuple or a list of such tuples event_generation_hook_fn : Callable - hook function that will as soon as acquisition event_implementations are generated (before hardware sequencing optimization - in the acquisition engine. This is useful if one wants to modify acquisition event_implementations that they didn't generate + hook function that will as soon as acquisition events are generated (before hardware sequencing optimization + in the acquisition engine. This is useful if one wants to modify acquisition events that they didn't generate (e.g. those generated by a GUI application). Accepts either one argument (the current acquisition event) or two arguments (current event, event_queue) pre_hardware_hook_fn : Callable hook function that will be run just before the hardware is updated before acquiring - a execution_engine image. In the case of hardware sequencing, it will be run just before a sequence of instructions are + a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are dispatched to the hardware. Accepts either one argument (the current acquisition event) or two arguments (current event, event_queue) post_hardware_hook_fn : Callable hook function that will be run just before the hardware is updated before acquiring - a execution_engine image. In the case of hardware sequencing, it will be run just after a sequence of instructions are + a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument (the current acquisition event) or two arguments (current event, event_queue) post_camera_hook_fn : Callable @@ -131,7 +131,7 @@ def __init__( so as to not back up the processing of other notifications. image_saved_fn : Callable function that takes two arguments (the Axes of the image that just finished saving, and the Dataset) - or three arguments (Axes, Dataset and the event_queue) and gets called whenever a execution_engine image is written to + or three arguments (Axes, Dataset and the event_queue) and gets called whenever a new image is written to disk napari_viewer : napari.Viewer Provide a napari viewer to display acquired data in napari (https://napari.org/) rather than the built-in @@ -221,35 +221,35 @@ def get_dataset(self): def mark_finished(self): """ - Signal to acquisition that no more event_implementations will be added and it is time to initiate shutdown. + Signal to acquisition that no more events will be added and it is time to initiate shutdown. This is only needed if the context manager (i.e. "with Acquisition...") is not used. """ - # Some acquisition types (e.g. ExploreAcquisitions) generate their own event_implementations - # and don't send event_implementations over a port + # Some acquisition types (e.g. ExploreAcquisitions) generate their own events + # and don't send events over a port if self._event_queue is not None: - # this should shut down storage_implementations and viewer as appropriate + # this should shut down storage and viewer as appropriate self._event_queue.put(None) def acquire(self, event_or_events: dict or list or Generator) -> AcquisitionFuture: """ - Submit an event or a list of event_implementations for acquisition. A single event is a python dictionary - with a specific structure. The acquisition engine will determine if multiple event_implementations can + Submit an event or a list of events for acquisition. A single event is a python dictionary + with a specific structure. The acquisition engine will determine if multiple events can be merged into a hardware sequence and executed at once without computer-hardware communication in - between. This sequencing will only take place for event_implementations that are within a single call to acquire, + between. This sequencing will only take place for events that are within a single call to acquire, so if you want to ensure this doesn't happen, call acquire multiple times with each event in a list individually. Parameters ---------- event_or_events : list, dict, Generator - A single acquistion event (a dict), a list of acquisition event_implementations, or a generator that yields - acquisition event_implementations. + A single acquistion event (a dict), a list of acquisition events, or a generator that yields + acquisition events. """ try: - if self._are_events_finished(): + if self._acq.are_events_finished(): raise AcqAlreadyCompleteException( - 'Cannot submit more event_implementations because this acquisition is already finished') + 'Cannot submit more events because this acquisition is already finished') if event_or_events is None: # manual shutdown @@ -260,8 +260,8 @@ def acquire(self, event_or_events: dict or list or Generator) -> AcquisitionFutu acq_future = AcquisitionFuture(self) def notifying_generator(original_generator): - # store in a weakref so that if user code doesn't hang on to AcqFuture - # it doesn't needlessly track event_implementations + # store in a weakref so that if user code doesn't hange on to AcqFuture + # it doesn't needlessly track events acq_future_weakref = weakref.ref(acq_future) for event in original_generator: future = acq_future_weakref() @@ -269,7 +269,6 @@ def notifying_generator(original_generator): acq_future._monitor_axes(event['axes']) _validate_acq_events(event) yield event - event_or_events = notifying_generator(event_or_events) else: _validate_acq_events(event_or_events) @@ -288,7 +287,7 @@ def notifying_generator(original_generator): def abort(self, exception=None): """ - Cancel any pending event_implementations and shut down immediately + Cancel any pending events and shut down immediately Parameters ---------- @@ -299,23 +298,17 @@ def abort(self, exception=None): if exception is not None: self._exception = exception - # Clear any pending event_implementations on the python side, if applicable + # Clear any pending events on the python side, if applicable if self._event_queue is not None: self._event_queue.clear() - # Don't send any more event_implementations. The event sending thread should know shut itself down by + # Don't send any more events. The event sending thread should know shut itself down by # checking the status of the acquisition - self.abort() + self._acq.abort() - @abstractmethod - def _are_events_finished(self): - """ - Check if all event_implementations have been processed and executed - """ - pass def _add_storage_monitor_fn(self, image_saved_fn=None): """ - Add a callback function that gets called whenever a execution_engine image is writtern to disk (for acquisitions in + Add a callback function that gets called whenever a new image is writtern to disk (for acquisitions in progress only) Parameters @@ -350,7 +343,7 @@ def _storage_monitor_fn(): return t def _create_event_queue(self): - """Create thread safe queue for event_implementations so they can be passed from multiple processes""" + """Create thread safe queue for events so they can be passed from multiple processes""" self._event_queue = EventQueue() def _call_image_process_fn(self, image, metadata): @@ -383,8 +376,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): def _validate_acq_events(events: dict or list): """ - Validate if supplied event_implementations are a dictionary or a list of dictionaries - that contain valid event_implementations. Throw an exception if not + Validate if supplied events are a dictionary or a list of dictionaries + that contain valid events. Throw an exception if not Parameters ---------- @@ -395,14 +388,14 @@ def _validate_acq_events(events: dict or list): _validate_acq_dict(events) elif isinstance(events, list): if len(events) == 0: - raise Exception('event_implementations list cannot be empty') + raise Exception('events list cannot be empty') for event in events: if isinstance(event, dict): _validate_acq_dict(event) else: - raise Exception('event_implementations must be a dictionary or a list of dictionaries') + raise Exception('events must be a dictionary or a list of dictionaries') else: - raise Exception('event_implementations must be a dictionary or a list of dictionaries') + raise Exception('events must be a dictionary or a list of dictionaries') def _validate_acq_dict(event: dict): """ @@ -429,20 +422,20 @@ def _validate_acq_dict(event: dict): def multi_d_acquisition_events( - num_time_points: int = None, - time_interval_s: Union[float, List[float]] = 0, - z_start: float = None, - z_end: float = None, - z_step: float = None, - channel_group: str = None, - channels: list = None, - channel_exposures_ms: list = None, - xy_positions: Iterable = None, - xyz_positions: Iterable = None, - position_labels: List[str] = None, - order: str = "tpcz", + num_time_points: int=None, + time_interval_s: Union[float, List[float]]=0, + z_start: float=None, + z_end: float=None, + z_step: float=None, + channel_group: str=None, + channels: list=None, + channel_exposures_ms: list=None, + xy_positions: Iterable=None, + xyz_positions: Iterable=None, + position_labels: List[str]=None, + order: str="tpcz", ): - """Convenience function for generating the event_implementations of a typical multi-dimensional acquisition (i.e. an + """Convenience function for generating the events of a typical multi-dimensional acquisition (i.e. an acquisition with some combination of multiple timepoints, channels, z-slices, or xy positions) Parameters @@ -450,8 +443,8 @@ def multi_d_acquisition_events( num_time_points : int How many time points if it is a timelapse (Default value = None) time_interval_s : float or list of floats - the minimum interval between consecutive time points in seconds. If set to 0, the - acquisition will go as fast as possible. If a list is provided, its length should + the minimum interval between consecutive time points in seconds. If set to 0, the + acquisition will go as fast as possible. If a list is provided, its length should be equal to 'num_time_points'. Elements in the list are assumed to be the intervals between consecutive timepoints in the timelapse. First element in the list indicates delay before capturing the first image (Default value = 0) @@ -486,7 +479,7 @@ def multi_d_acquisition_events( Returns ------- - event_implementations : dict + events : dict """ if xy_positions is not None and xyz_positions is not None: raise ValueError( @@ -507,7 +500,7 @@ def multi_d_acquisition_events( raise ValueError("xy_positions and position_labels must be of equal length") if xyz_positions is not None and len(xyz_positions) != len(position_labels): raise ValueError("xyz_positions and position_labels must be of equal length") - + # If any of z_start, z_step, z_end are provided, then they should all be provided # Here we can't use `all` as some of the values of z_start, z_step, z_end # may be zero and all((0,)) = False @@ -583,7 +576,7 @@ def generate_events(event, order): elif order[0] == "c" and channel_group is not None and channels is not None: for i in range(len(channels)): new_event = copy.deepcopy(event) - new_event["config_group"] = [channel_group, channels[i]] + new_event["config_group"] = [channel_group, channels[i]] new_event["axes"]["channel"] = channels[i] if channel_exposures_ms is not None: new_event["exposure"] = channel_exposures_ms[i] @@ -592,7 +585,7 @@ def generate_events(event, order): # this axis appears to be missing yield generate_events(event, order[1:]) - # collect all event_implementations into a single list + # collect all events into a single list base_event = {"axes": {}} events = [] @@ -615,4 +608,6 @@ def appender(next): events.append(next) appender(generate_events(base_event, order)) - return events \ No newline at end of file + return events + + diff --git a/pycromanager/acquisition/python_backend_acquisitions.py b/pycromanager/acquisition/python_backend_acquisitions.py index 09400a7d..070adc93 100644 --- a/pycromanager/acquisition/python_backend_acquisitions.py +++ b/pycromanager/acquisition/python_backend_acquisitions.py @@ -1,34 +1,25 @@ import warnings from docstring_inheritance import NumpyDocstringInheritanceMeta +from pycromanager.acquisition.acq_eng_py.main.AcqEngPy_Acquisition import Acquisition as pymmcore_Acquisition from pycromanager.acquisition.acquisition_superclass import _validate_acq_events, Acquisition -# from pycromanager.acquisition.execution_engine.acq_events import AcquisitionEvent -#TODO: -AcquisitionEvent = None -from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata -from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification -from pycromanager.acquisition.acq_eng_py.internal.notification_handler import NotificationHandler -from pycromanager.acquisition.acq_eng_py.internal.engine import Engine +from pycromanager.acquisition.acq_eng_py.main.acquisition_event import AcquisitionEvent +from pycromanager.acq_future import AcqNotification import threading from inspect import signature import traceback -import queue from ndstorage.ndram_dataset import NDRAMDataset from ndstorage.ndtiff_dataset import NDTiffDataset -from pycromanager.acquisition.acq_eng_py.internal.hooks import EVENT_GENERATION_HOOK, \ - BEFORE_HARDWARE_HOOK, BEFORE_Z_DRIVE_HOOK, AFTER_HARDWARE_HOOK, AFTER_CAMERA_HOOK, AFTER_EXPOSURE_HOOK - - -IMAGE_QUEUE_SIZE = 30 - - class PythonBackendAcquisition(Acquisition, metaclass=NumpyDocstringInheritanceMeta): """ - Pycro-Manager acquisition that uses a Python runtime backend. + Pycro-Manager acquisition that uses a Python runtime backend. Unlike the Java backend, + Python-backed acquisitions currently do not automatically write data to disk. Instead, by default, + they store data in RAM which can be queried with the Dataset class. If instead you want to + implement your own data storage, you can pass an image_process_fn which diverts the data to + a custom endpoint. """ - def __init__( self, directory: str=None, @@ -51,9 +42,6 @@ def __init__( dict(signature(PythonBackendAcquisition.__init__).parameters.items())[arg_name].default) for arg_name in arg_names } super().__init__(**named_args) - - self._engine = Engine.get_instance() - self._dataset = NDRAMDataset() if not directory else NDTiffDataset(directory, name=name, writable=True) self._finished = False self._notifications_finished = False @@ -63,52 +51,25 @@ def __init__( self._image_processor = ImageProcessor(self) if image_process_fn is not None else None - # create a thread that submits event_implementations - # event_implementations can be added to the queue through image processors, hooks, or the acquire method + # create a thread that submits events + # events can be added to the queue through image processors, hooks, or the acquire method def submit_events(): while True: event_or_events = self._event_queue.get() if event_or_events is None: - self._finish() - self._events_finished.wait() + self._acq.finish() + self._acq.block_until_events_finished() break _validate_acq_events(event_or_events) if isinstance(event_or_events, dict): event_or_events = [event_or_events] # convert to objects - event_or_events = [AcquisitionEvent.from_json(event, self) for event in event_or_events] - Engine.get_instance().submit_event_iterator(iter(event_or_events)) - + event_or_events = [AcquisitionEvent.from_json(event, self._acq) for event in event_or_events] + self._acq.submit_event_iterator(iter(event_or_events)) self._event_thread = threading.Thread(target=submit_events) self._event_thread.start() - self._events_finished = threading.Event() - self.abort_requested_ = threading.Event() - self.start_time_ms_ = -1 - self.paused_ = False - - self.event_generation_hooks_ = [] - self.before_hardware_hooks_ = [] - self.before_z_hooks_ = [] - self.after_hardware_hooks_ = [] - self.after_camera_hooks_ = [] - self.after_exposure_hooks_ = [] - self.image_processors_ = [] - - self.first_dequeue_ = queue.Queue(maxsize=IMAGE_QUEUE_SIZE) - self.processor_output_queues_ = {} - self.debug_mode_ = False - self.abort_exception_ = None - self.image_metadata_processor_ = None - self.notification_handler_ = NotificationHandler() - self.started_ = False - self.core_ = Engine.get_core() - self.data_sink_ = self._dataset - - summary_metadata = AcqEngMetadata.make_summary_metadata(self.core_, self) - - if self.data_sink_: - self.data_sink_.initialize(summary_metadata) + self._acq = pymmcore_Acquisition(self._dataset) # receive notifications from the acquisition engine. Unlike the java_backend analog # of this, the python backend does not have a separate thread for notifications because @@ -122,7 +83,7 @@ def post_notification(notification): if self._image_notification_queue.qsize() > self._image_notification_queue.maxsize * 0.9: warnings.warn(f"Acquisition image notification queue size: {self._image_notification_queue.qsize()}") - self._add_acq_notification_listener(NotificationListener(post_notification)) + self._acq.add_acq_notification_listener(NotificationListener(post_notification)) self._notification_dispatch_thread = self._start_notification_dispatcher(notification_callback_fn) @@ -153,10 +114,7 @@ def post_notification(notification): assert isinstance(napari_viewer, napari.Viewer), 'napari_viewer must be an instance of napari.Viewer' self._napari_viewer = napari_viewer start_napari_signalling(self._napari_viewer, self.get_dataset()) - - self._start_saving_thread() - self._post_notification(AcqNotification.create_acq_started_notification()) - self.started_ = True + self._acq.start() ######## Public API ########### @@ -164,13 +122,12 @@ def post_notification(notification): def await_completion(self): """Wait for acquisition to finish and resources to be cleaned up""" try: - while not self._are_events_finished() or ( - self._dataset is not None and not self._dataset.is_finished()): + while not self._acq.are_events_finished() or ( + self._acq.get_data_sink() is not None and not self._acq.get_data_sink().is_finished()): self._check_for_exceptions() - self._events_finished.wait(0.05) - if self._dataset is not None: - self._dataset.block_until_finished(0.05) - # time.sleep(0.05) # does this prevent things from getting stuck? + self._acq.block_until_events_finished(0.05) + if self._acq.get_data_sink() is not None: + self._acq.get_data_sink().block_until_finished(0.05) self._check_for_exceptions() finally: self._event_thread.join() @@ -208,130 +165,10 @@ def _check_for_exceptions(self): def _are_acquisition_notifications_finished(self): """ - Called by the storage_implementations to check if all notifications have been processed + Called by the storage to check if all notifications have been processed """ return self._notifications_finished - - def _post_notification(self, notification): - self.notification_handler_.post_notification(notification) - - def _add_acq_notification_listener(self, post_notification_fn): - self.notification_handler_.add_listener(post_notification_fn) - - def _save_image(self, image): - if image is None: - self.data_sink_.finish() - self._post_notification(AcqNotification.create_data_sink_finished_notification()) - else: - pixels, metadata = image.pix, image.tags - axes = AcqEngMetadata.get_axes(metadata) - self.data_sink_.put_image(axes, pixels, metadata) - self._post_notification(AcqNotification.create_image_saved_notification(axes)) - - def _start_saving_thread(self): - def saving_thread(acq): - try: - while True: - if acq.debug_mode_: - acq.core_.log_message(f"Image queue size: {len(acq.first_dequeue_)}") - if not acq.image_processors_: - if acq.debug_mode_: - acq.core_.log_message("waiting for image to save") - img = acq.first_dequeue_.get() - if acq.debug_mode_: - acq.core_.log_message("got image to save") - acq._save_image(img) - if img is None: - break - else: - img = acq.processor_output_queues_[acq.image_processors_[-1]].get() - if acq.data_sink_: - if acq.debug_mode_: - acq.core_.log_message("Saving image") - if img.tags is None and img.pix is None: - break - acq._save_image(img) - if acq.debug_mode_: - acq.core_.log_message("Finished saving image") - except Exception as ex: - traceback.print_exc() - acq.abort(ex) - finally: - acq._save_image(None) - - threading.Thread(target=saving_thread, args=(self,)).start() - - - def _add_to_output(self, ti): - try: - if ti is None: - self._events_finished.set() - self.first_dequeue_.put(ti) - except Exception as ex: - raise RuntimeError(ex) - - def _finish(self): - Engine.get_instance().finish_acquisition(self) - - def _abort(self, ex): - if ex: - self.abort_exception_ = ex - if self.abort_requested_.is_set(): - return - self.abort_requested_.set() - if self.is_paused(): - self.set_paused(False) - Engine.get_instance().finish_acquisition(self) - - def _check_for_exceptions(self): - if self.abort_exception_: - raise self.abort_exception_ - - def _add_image_processor(self, p): - if self.started_: - raise RuntimeError("Cannot add processor after acquisition started") - self.image_processors_.append(p) - self.processor_output_queues_[p] = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) - if len(self.image_processors_) == 1: - p.set_acq_and_queues(self, self.first_dequeue_, self.processor_output_queues_[p]) - else: - p.set_acq_and_queues(self, self.processor_output_queues_[self.image_processors_[-2]], - self.processor_output_queues_[self.image_processors_[-1]]) - - def _add_hook(self, h, type_): - if self.started_: - raise RuntimeError("Cannot add hook after acquisition started") - if type_ == EVENT_GENERATION_HOOK: - self.event_generation_hooks_.append(h) - elif type_ == BEFORE_HARDWARE_HOOK: - self.before_hardware_hooks_.append(h) - elif type_ == BEFORE_Z_DRIVE_HOOK: - self.before_z_hooks_.append(h) - elif type_ == AFTER_HARDWARE_HOOK: - self.after_hardware_hooks_.append(h) - elif type_ == AFTER_CAMERA_HOOK: - self.after_camera_hooks_.append(h) - elif type_ == AFTER_EXPOSURE_HOOK: - self.after_exposure_hooks_.append(h) - - def _get_hooks(self, type): - if type == EVENT_GENERATION_HOOK: - return self.event_generation_hooks_ - elif type == BEFORE_HARDWARE_HOOK: - return self.before_hardware_hooks_ - elif type == BEFORE_Z_DRIVE_HOOK: - return self.before_z_hooks_ - elif type == AFTER_HARDWARE_HOOK: - return self.after_hardware_hooks_ - elif type == AFTER_CAMERA_HOOK: - return self.after_camera_hooks_ - elif type == AFTER_EXPOSURE_HOOK: - return self.after_exposure_hooks_ - - def _are_events_finished(self): - return self._events_finished.is_set() - class ImageProcessor: """ This is the equivalent of RemoteImageProcessor in the Java version. @@ -354,7 +191,7 @@ def _process(self): while True: # wait for an image to arrive tagged_image = self.input_queue.get() - if tagged_image is None: + if tagged_image.tags is None and tagged_image.pix is None: # this is a signal to stop self.output_queue.put(tagged_image) break diff --git a/scripts/bridge_tests.py b/scripts/bridge_tests.py index 70ccf89b..3ec328d7 100644 --- a/scripts/bridge_tests.py +++ b/scripts/bridge_tests.py @@ -10,7 +10,7 @@ def other_thread(core): core = None -### Create an object and a child object on a execution_engine socket +### Create an object and a child object on a new socket core = ZMQRemoteMMCoreJ(debug=False) core.get_system_state_cache(new) diff --git a/scripts/camera_triggering/genIexamples.py b/scripts/camera_triggering/genIexamples.py index 93cf2b41..82556ac4 100644 --- a/scripts/camera_triggering/genIexamples.py +++ b/scripts/camera_triggering/genIexamples.py @@ -130,7 +130,7 @@ def live_mode_hardware_trigger(): core.set_exposure(camera_name, 500) core.arm_acquisition(camera_name) core.start_acquisition(camera_name) - # TODO: event_implementations + # TODO: events # Register(Camera.EventExposureEnd, CallbackDataObject, CallbackFunctionPtr) # EventSelector = ExposureEnd; # EventNotification = On; @@ -178,7 +178,7 @@ def multiple_bursts_hardware_trigger(): # TriggerMode = On; # TriggerActivation = RisingEdge; # TriggerSource = Line1; - # TODO event_implementations + # TODO events # Register(Camera.EventFrameBurstEnd,CallbackDataObject,CallbackFunctionPtr) # EventSelector = FrameBurstEnd; # EventNotification = On; diff --git a/scripts/custom_axis_acq.py b/scripts/custom_axis_acq.py index 2c1c3e6e..1b423aa5 100644 --- a/scripts/custom_axis_acq.py +++ b/scripts/custom_axis_acq.py @@ -8,7 +8,7 @@ for time in range(5): for index, z_um in enumerate(np.arange(start=0, stop=10, step=0.5)): evt = { - #'axes' is required. It is used by the image viewer and data storage_implementations to + #'axes' is required. It is used by the image viewer and data storage to # identify the acquired image "axes": {"l": index, "time": time}, # the 'z' field provides the z position in µm diff --git a/scripts/external_camera_trigger.py b/scripts/external_camera_trigger.py index 3d809539..36afb497 100644 --- a/scripts/external_camera_trigger.py +++ b/scripts/external_camera_trigger.py @@ -13,7 +13,7 @@ def external_trigger_fn(event): name="tcz_acq", post_camera_hook_fn=external_trigger_fn, ) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=10, time_interval_s=0, diff --git a/scripts/generate_ndtiff_test.py b/scripts/generate_ndtiff_test.py index cebe0bdc..69e2d7cf 100644 --- a/scripts/generate_ndtiff_test.py +++ b/scripts/generate_ndtiff_test.py @@ -26,7 +26,7 @@ with JavaBackendAcquisition(directory=save_dir, name="ndtiffv3.0_test", show_display=True, ) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=5, time_interval_s=0, diff --git a/scripts/headless_demo.py b/scripts/headless_demo.py index 1507274f..a727f552 100644 --- a/scripts/headless_demo.py +++ b/scripts/headless_demo.py @@ -28,7 +28,7 @@ def image_saved_fn(axes, dataset): with JavaBackendAcquisition(directory=save_dir, name="tcz_acq", show_display=True, image_saved_fn=image_saved_fn ) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=5, time_interval_s=0, diff --git a/scripts/image_processor.py b/scripts/image_processor.py index b10663f6..e0b112d9 100644 --- a/scripts/image_processor.py +++ b/scripts/image_processor.py @@ -10,7 +10,7 @@ def img_process_fn(image, metadata): with JavaBackendAcquisition( directory=r"C:\Users\henry\Desktop\datadump", name="tcz_acq", image_process_fn=img_process_fn ) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=10, time_interval_s=0, diff --git a/scripts/image_processor_divert.py b/scripts/image_processor_divert.py index 9347a0ff..f08dc556 100644 --- a/scripts/image_processor_divert.py +++ b/scripts/image_processor_divert.py @@ -7,7 +7,7 @@ def img_process_fn(image, metadata): pass # send them somewhere else, not default saving and display with JavaBackendAcquisition(image_process_fn=img_process_fn) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=10, time_interval_s=0, diff --git a/scripts/image_processor_multiple.py b/scripts/image_processor_multiple.py index 30ceb41d..115ded99 100644 --- a/scripts/image_processor_multiple.py +++ b/scripts/image_processor_multiple.py @@ -26,7 +26,7 @@ def img_process_fn(image, metadata): with JavaBackendAcquisition( directory="/Users/henrypinkard/megllandump", name="tcz_acq", image_process_fn=img_process_fn ) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=10, time_interval_s=0, diff --git a/scripts/lightsheet_deskew.py b/scripts/lightsheet_deskew.py index a4b81e15..5fc769bc 100644 --- a/scripts/lightsheet_deskew.py +++ b/scripts/lightsheet_deskew.py @@ -104,7 +104,7 @@ def precompute_recon_weightings(self, do_orthogonal_views=True, do_volume=True): for z_index_camera in np.arange(self.camera_shape[0]): for y_index_camera in np.arange(self.camera_shape[1]): - # where does each line of x pixels belong in the execution_engine image? + # where does each line of x pixels belong in the new image? if (z_index_camera, y_index_camera) not in self.recon_coord_LUT: print('ignoring: ', z_index_camera, y_index_camera) continue @@ -145,7 +145,7 @@ def make_projections(self, data, do_orthogonal_views=True, do_volume=True): # do the projection/reconstruction # iterate through each z slice of the image - # at each z slice, iterate through each x pixel and copy a line of y pixels to the execution_engine image + # at each z slice, iterate through each x pixel and copy a line of y pixels to the new image for z_index_camera in np.arange(0, self.camera_shape[0], 1): image_on_camera = data[z_index_camera] for y_index_camera in range(self.camera_shape[1]): @@ -153,7 +153,7 @@ def make_projections(self, data, do_orthogonal_views=True, do_volume=True): continue source_line_of_x_pixels = image_on_camera[y_index_camera] - # where does each line of x pixels belong in the execution_engine image? + # where does each line of x pixels belong in the new image? dest_coords = self.recon_coord_LUT[(z_index_camera, y_index_camera)] for dest_coord in dest_coords: recon_z, recon_y = dest_coord diff --git a/scripts/magellan_surfaces.py b/scripts/magellan_surfaces.py index 482e71d2..ecb41542 100644 --- a/scripts/magellan_surfaces.py +++ b/scripts/magellan_surfaces.py @@ -27,7 +27,7 @@ ### Part 3a run autofocus ##### - # TODO: maybe run an initial focus integration_tests to see how off the surface is + # TODO: maybe run an initial focus test to see how off the surface is # this function will run after the hardware has been updated (i.e. xy stage moved) but before each image is acquired diff --git a/scripts/multi_d_acq.py b/scripts/multi_d_acq.py index 39e82525..d5411af5 100644 --- a/scripts/multi_d_acq.py +++ b/scripts/multi_d_acq.py @@ -2,7 +2,7 @@ with JavaBackendAcquisition(directory=r"/Users/henrypinkard/tmp", name="tcz_acq", debug=False) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=8, time_interval_s=0, diff --git a/scripts/napari_frontend.py b/scripts/napari_frontend.py index 7e8e9fe6..b6618f41 100644 --- a/scripts/napari_frontend.py +++ b/scripts/napari_frontend.py @@ -22,7 +22,7 @@ def image_saved_callback(axes, d): """ - Callback function that will be used to signal to napari that a execution_engine image is ready + Callback function that will be used to signal to napari that a new image is ready """ global dataset global update_ready @@ -48,7 +48,7 @@ def run_acq(): def update_layer(image): """ - update the napari layer with the execution_engine image + update the napari layer with the new image """ if len(viewer.layers) == 0: viewer.add_image(image) @@ -61,14 +61,14 @@ def update_layer(image): @thread_worker(connect={'yielded': update_layer}) def update_images(): """ - Monitor for signals that Acqusition has a execution_engine image ready, and when that happens + Monitor for signals that Acqusition has a new image ready, and when that happens update napari appropriately """ global update_ready while True: if update_ready: update_ready = False - # A execution_engine image has arrived, but we only need to regenerate the dask array + # A new image has arrived, but we only need to regenerate the dask array # if its shape has changed shape = np.array([len(dataset.axes[name]) for name in dataset.axes.keys()]) if not hasattr(update_images, 'old_shape') or \ diff --git a/scripts/speed_test.py b/scripts/speed_test.py index 2942711f..65291e77 100644 --- a/scripts/speed_test.py +++ b/scripts/speed_test.py @@ -1,7 +1,7 @@ from pycromanager import JavaClass, ZMQRemoteMMCoreJ -tester = JavaClass('org.micromanager.acquisition.kernel.acqengjcompat.speedtest.SpeedTest') +tester = JavaClass('org.micromanager.acquisition.internal.acqengjcompat.speedtest.SpeedTest') pass dir = r'C:\Users\henry\Desktop\data' diff --git a/scripts/string_axes.py b/scripts/string_axes.py index 061a95fe..6cbe26b0 100644 --- a/scripts/string_axes.py +++ b/scripts/string_axes.py @@ -1,5 +1,5 @@ """ -integration_tests the ability to acquisitions to have String axes instead of int ones +test the ability to acquisitions to have String axes instead of int ones """ @@ -8,7 +8,7 @@ with JavaBackendAcquisition(directory="/Users/henrypinkard/tmp", name="NDTiff3.2_monochrome", debug=False) as acq: - # Generate the event_implementations for a single z-stack + # Generate the events for a single z-stack events = multi_d_acquisition_events_new( num_time_points=8, time_interval_s=0, From ce1b0280da11ac51381ab2171ccc44f1726be293 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 9 Jul 2024 18:23:17 +0200 Subject: [PATCH 18/20] revert accidental changes --- pycromanager/acquisition/RAMStorage_java.py | 6 +-- .../acquisition/acq_eng_py/internal/hooks.py | 17 ------- pycromanager/headless.py | 47 +++++++++++++++---- pycromanager/mm_java_classes.py | 6 +-- pycromanager/napari_util.py | 6 +-- pycromanager/test/conftest.py | 4 +- pycromanager/test/test_callback_functions.py | 2 +- pycromanager/test/test_notifications.py | 4 +- 8 files changed, 53 insertions(+), 39 deletions(-) delete mode 100644 pycromanager/acquisition/acq_eng_py/internal/hooks.py diff --git a/pycromanager/acquisition/RAMStorage_java.py b/pycromanager/acquisition/RAMStorage_java.py index e568fcac..6f54cbec 100644 --- a/pycromanager/acquisition/RAMStorage_java.py +++ b/pycromanager/acquisition/RAMStorage_java.py @@ -3,10 +3,10 @@ class NDRAMDatasetJava(NDStorageBase): """ - A python class that wraps a Java-backend RAM data storage_implementations. + A python class that wraps a Java-backend RAM data storage. This class maintains an index of which images have been saved, but otherwise routes all calls to the Java - implementation of the RAM data storage_implementations. + implementation of the RAM data storage. """ def __init__(self, java_RAM_data_storage): @@ -22,7 +22,7 @@ def close(self): def add_available_axes(self, image_coordinates): """ - The Java RAM storage_implementations has received a execution_engine image with the given axes. Add these axes to the index. + The Java RAM storage has received a new image with the given axes. Add these axes to the index. """ self._index_keys.add(frozenset(image_coordinates.items())) # update information about the available images diff --git a/pycromanager/acquisition/acq_eng_py/internal/hooks.py b/pycromanager/acquisition/acq_eng_py/internal/hooks.py deleted file mode 100644 index e9ea5c54..00000000 --- a/pycromanager/acquisition/acq_eng_py/internal/hooks.py +++ /dev/null @@ -1,17 +0,0 @@ - -EVENT_GENERATION_HOOK = 0 -# This hook runs before changes to the hardware (corresponding to the instructions in the -# event) are made -BEFORE_HARDWARE_HOOK = 1 -# This hook runs after all changes to the hardware except dor setting th Z drive have been -# made. This is useful for things such as autofocus. -BEFORE_Z_DRIVE_HOOK = 2 -# This hook runs after changes to the hardware took place, but before camera exposure -# (either a snap or a sequence) is started -AFTER_HARDWARE_HOOK = 3 -# Hook runs after the camera sequence acquisition has started. This can be used for -# external triggering of the camera -AFTER_CAMERA_HOOK = 4 -# Hook runs after the camera exposure ended (when possible, before readout of the camera -# and availability of the images in memory). -AFTER_EXPOSURE_HOOK = 5 \ No newline at end of file diff --git a/pycromanager/headless.py b/pycromanager/headless.py index 2243851d..84b3658a 100644 --- a/pycromanager/headless.py +++ b/pycromanager/headless.py @@ -6,7 +6,7 @@ import types import os -# from pycromanager.acquisition.acq_eng_py.kernel.engine import Engine +from pycromanager.acquisition.acq_eng_py.internal.engine import Engine from pymmcore import CMMCore import pymmcore from pyjavaz import DEFAULT_BRIDGE_PORT, server_terminated @@ -36,7 +36,7 @@ def _create_pymmcore_instance(): 2. add convenience methods to match the MMCoreJ API: """ - # Create a execution_engine dictionary for the class attributes + # Create a new dictionary for the class attributes new_attributes = {} # Iterate through the original attributes @@ -49,9 +49,42 @@ def _create_pymmcore_instance(): new_attr_name = _camel_to_snake(attr_name) new_attributes[new_attr_name] = attr_value - # Create and return a execution_engine class that subclasses the original class and has the execution_engine attributes + # Create and return a new class that subclasses the original class and has the new attributes clz = type(CMMCore.__name__ + "SnakeCase", (CMMCore,), new_attributes) + instance = clz() + + def pop_next_tagged_image(self): + md = pymmcore.Metadata() + pix = self.pop_next_image_md(0, 0, md) + tags = {key: md.GetSingleTag(key).GetValue() for key in md.GetKeys()} + return TaggedImage(tags, pix) + + def get_tagged_image(core, cam_index, camera, height, width, binning=None, pixel_type=None, roi_x_start=None, + roi_y_start=None): + """ + Different signature than the Java version because of difference in metadata handling in the swig layers + """ + pix = core.get_image() + md = pymmcore.Metadata() + # most of the same tags from pop_next_tagged_image, which may not be the same as the MMCoreJ version of this function + tags = {'Camera': camera, 'Height': height, 'Width': width, 'PixelType': pixel_type, + 'CameraChannelIndex': cam_index} + # Could optionally add these for completeness but there might be a performance hit + if binning is not None: + tags['Binning'] = binning + if roi_x_start is not None: + tags['ROI-X-start'] = roi_x_start + if roi_y_start is not None: + tags['ROI-Y-start'] = roi_y_start + + return TaggedImage(tags, pix) + + instance.get_tagged_image = types.MethodType(get_tagged_image, instance) + instance.pop_next_tagged_image = types.MethodType(pop_next_tagged_image, instance) + + # attach TaggedImage class + instance.TaggedImage = TaggedImage return instance @@ -79,9 +112,8 @@ def stop_headless(debug=False): logger.debug('Stopping pymmcore instance') c.unloadAllDevices() if debug: - logger.debug('Unloaded all device_implementations.py') - # TODO: shutdown execution_engine engine - # Engine.get_instance().shutdown() + logger.debug('Unloaded all devices') + Engine.get_instance().shutdown() if debug: logger.debug('Engine shut down') _PYMMCORES.clear() @@ -137,8 +169,7 @@ def start_headless( mmc.load_system_configuration(config_file) mmc.set_circular_buffer_memory_footprint(buffer_size_mb) _PYMMCORES.append(mmc) # Store so it doesn't get garbage collected - # TODO: startup execution_engine engine - # Engine(mmc) + Engine(mmc) else: classpath = mm_app_path + '/plugins/Micro-Manager/*' if java_loc is None: diff --git a/pycromanager/mm_java_classes.py b/pycromanager/mm_java_classes.py index 55a1a7e3..ac5bf87a 100644 --- a/pycromanager/mm_java_classes.py +++ b/pycromanager/mm_java_classes.py @@ -74,7 +74,7 @@ def __new__( port: int The port of the Bridge used to create the object new_socket: bool - If True, will create execution_engine java object on a execution_engine port so that blocking calls will not interfere + If True, will create new java object on a new port so that blocking calls will not interfere with the bridges main port debug: print debug messages @@ -117,7 +117,7 @@ def __new__( port: int The port of the Bridge used to create the object new_socket: bool - If True, will create execution_engine java object on a execution_engine port so that blocking calls will not interfere + If True, will create new java object on a new port so that blocking calls will not interfere with the bridges main port debug: bool print debug messages @@ -142,7 +142,7 @@ def __new__( port: int The port of the Bridge used to create the object new_socket: bool - If True, will create execution_engine java object on a execution_engine port so that blocking calls will not interfere + If True, will create new java object on a new port so that blocking calls will not interfere with the bridges main port debug: bool print debug messages diff --git a/pycromanager/napari_util.py b/pycromanager/napari_util.py index 6fe4eb1b..03aba724 100644 --- a/pycromanager/napari_util.py +++ b/pycromanager/napari_util.py @@ -8,7 +8,7 @@ def start_napari_signalling(viewer, dataset): """ - Start up a threadworker, which will check for execution_engine images arrived in the dataset + Start up a threadworker, which will check for new images arrived in the dataset and then signal to napari to update or refresh as needed :param viewer: the napari Viewer :param dataset: the Datatset being acquired @@ -17,7 +17,7 @@ def start_napari_signalling(viewer, dataset): def update_layer(image): """ - update the napari layer with the execution_engine image + update the napari layer with the new image """ if image is not None: try: @@ -29,7 +29,7 @@ def update_layer(image): @thread_worker(connect={'yielded': update_layer}) def napari_signaller(): """ - Monitor for signals that Acquisition has a execution_engine image ready, and when that happens + Monitor for signals that Acquisition has a new image ready, and when that happens update napari appropriately """ # don't update faster than the display can handle diff --git a/pycromanager/test/conftest.py b/pycromanager/test/conftest.py index d2dc28ee..e59fd501 100644 --- a/pycromanager/test/conftest.py +++ b/pycromanager/test/conftest.py @@ -72,7 +72,7 @@ def install_mm(): # find pycro-manager/java path if os.path.isdir('java'): java_path = os.path.abspath('java') - # in case cwd is '/pycromanager/integration_tests' + # in case cwd is '/pycromanager/test' elif os.path.isdir('../../java'): java_path = os.path.abspath('../../java') else: @@ -86,7 +86,7 @@ def install_mm(): print(f'Removed {file_path}') # Copy the pycromanagerjava.jar file that was compiled by the github action - # into the nightly build so that it will integration_tests with the latest code + # into the nightly build so that it will test with the latest code compiled_jar_path = os.path.join(java_path, 'target', 'PycromanagerJava-*.jar') destination_path = os.path.join(mm_install_dir, 'plugins', 'Micro-Manager', 'PycromanagerJava.jar') diff --git a/pycromanager/test/test_callback_functions.py b/pycromanager/test/test_callback_functions.py index e39b3531..123a227b 100644 --- a/pycromanager/test/test_callback_functions.py +++ b/pycromanager/test/test_callback_functions.py @@ -90,7 +90,7 @@ def hook_fn(event): return None # cancel the event with Acquisition(show_display=False, pre_hardware_hook_fn=hook_fn) as acq: - # copy list of event_implementations to avoid popping from original + # copy list of events to avoid popping from original events_copy = [e for e in events] for test_event in events: acq.acquire(test_event) diff --git a/pycromanager/test/test_notifications.py b/pycromanager/test/test_notifications.py index 437c990d..0b725f19 100644 --- a/pycromanager/test/test_notifications.py +++ b/pycromanager/test/test_notifications.py @@ -8,9 +8,9 @@ # TODO: add tests for timing of blocking until different parts of the hardware sequence # def test_async_images_read(launch_mm_headless, setup_data_folder): # start = time.time() -# event_implementations = multi_d_acquisition_events(num_time_points=10, time_interval_s=0.5) +# events = multi_d_acquisition_events(num_time_points=10, time_interval_s=0.5) # with Acquisition(directory=setup_data_folder, show_display=False) as acq: -# future = acq.acquire(event_implementations) +# future = acq.acquire(events) # # future.await_execution({'time': 5}, AcqNotification.Hardware.POST_HARDWARE) From ff70ec86682ae2e2e885b5abf6bb777b69cef6ae Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 9 Jul 2024 18:23:51 +0200 Subject: [PATCH 19/20] revert accidental changes --- requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index c8236a2b..6720e4c8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,5 @@ ndstorage>=0.1.6 docstring-inheritance pymmcore sortedcontainers -wget pyjavaz==1.2.4 -wget +wget \ No newline at end of file From 1cef91586b0282a7e1cf8286cf0980d4666ac351 Mon Sep 17 00:00:00 2001 From: Henry Pinkard <7969470+henrypinkard@users.noreply.github.com> Date: Tue, 9 Jul 2024 18:24:33 +0200 Subject: [PATCH 20/20] revert accidental changes --- pycromanager/test/test_device.py | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 pycromanager/test/test_device.py diff --git a/pycromanager/test/test_device.py b/pycromanager/test/test_device.py deleted file mode 100644 index bd444443..00000000 --- a/pycromanager/test/test_device.py +++ /dev/null @@ -1,7 +0,0 @@ -# -# def test_micro_manager_camera_snap(): -# camera = MicroManagerCamera() -# -# camera.arm(1) -# camera.start() -# image, metadata = camera.pop_image() \ No newline at end of file