diff --git a/docs/source/application_notebooks/PSF_viewer.py b/docs/source/application_notebooks/PSF_viewer.py index a78ac529..84d4dac0 100644 --- a/docs/source/application_notebooks/PSF_viewer.py +++ b/docs/source/application_notebooks/PSF_viewer.py @@ -44,7 +44,7 @@ import napari from napari.qt import thread_worker from magicgui import magicgui -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events # open napari in an extra window, only needed for jupyter notebooks #%gui qt @@ -166,9 +166,9 @@ def acquire_data(z_range): """ micro-manager data acquisition. Creates acquisition events for z-stack. This example: use custom events, not multi_d_acquisition because the z-stage is not run from micro-manager but controlled via external DAQ.""" - with Acquisition(directory=None, name=None, - show_display=True, - image_process_fn = grab_image) as acq: + with JavaBackendAcquisition(directory=None, name=None, + show_display=True, + image_process_fn = grab_image) as acq: events = [] for index, z_um in enumerate(np.linspace(z_range[0], z_range[1], z_range[2])): evt = {"axes": {"z_ext": index}, "z_ext": z_um} @@ -182,9 +182,9 @@ def acquire_multid(z_range): from micro-manager. Unless hardware triggering is set up in micro-manager, this will be fairly slow: micro-manager does not sweep the z-stage, but acquires plane by plane. """ - with Acquisition(directory=None, name=None, - show_display=False, - image_process_fn = grab_image) as acq: + with JavaBackendAcquisition(directory=None, name=None, + show_display=False, + image_process_fn = grab_image) as acq: events = multi_d_acquisition_events(z_start=z_range[0], z_end=z_range[1], z_step=(z_range[1]-z_range[0])/(z_range[2]-1)) acq.acquire(events) diff --git a/java/pom.xml b/java/pom.xml index 13030f7e..a5794e44 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -2,7 +2,7 @@ 4.0.0 org.micro-manager.pycro-manager PycroManagerJava - 0.43.1 + 0.44.0 jar Pycro-Manager Java The Java components of Pycro-Manager @@ -54,7 +54,7 @@ org.micro-manager.acqengj AcqEngJ - 0.32.2 + 0.33.0 org.micro-manager.ndviewer @@ -64,7 +64,7 @@ org.micro-manager.ndtiffstorage NDTiffStorage - 2.14.0 + 2.15.1 diff --git a/java/src/main/java/org/micromanager/explore/ExploreAcqUIAndStorage.java b/java/src/main/java/org/micromanager/explore/ExploreAcqUIAndStorage.java index 75e46df5..10b1ece7 100644 --- a/java/src/main/java/org/micromanager/explore/ExploreAcqUIAndStorage.java +++ b/java/src/main/java/org/micromanager/explore/ExploreAcqUIAndStorage.java @@ -358,7 +358,7 @@ protected ExploreMouseListener createMouseListener() { return new ExploreMouseListener(acq_, display_, logger_); } - public void putImage(final TaggedImage taggedImg) { + public Object putImage(final TaggedImage taggedImg) { String channelName = (String) AcqEngMetadata.getAxes(taggedImg.tags).get("channel"); boolean newChannel = !channelNames_.contains(channelName); @@ -399,6 +399,7 @@ public void run() { } }); } + return null; } diff --git a/java/src/main/java/org/micromanager/internal/zmq/ZMQServer.java b/java/src/main/java/org/micromanager/internal/zmq/ZMQServer.java index 3fe85b84..42866f42 100644 --- a/java/src/main/java/org/micromanager/internal/zmq/ZMQServer.java +++ b/java/src/main/java/org/micromanager/internal/zmq/ZMQServer.java @@ -42,7 +42,7 @@ public class ZMQServer extends ZMQSocketWrapper { private static Function classMapper_; private static ZMQServer mainServer_; static boolean debug_ = false; - private Consumer debugLogger_; + private static Consumer debugLogger_; //for testing // public static void main(String[] args) { @@ -76,6 +76,9 @@ public ZMQServer(Collection cls, Function classMappe public ZMQServer(Collection cls, Function classMapper, String[] excludePaths, Consumer debugLogger, int port) throws URISyntaxException, UnsupportedEncodingException { super(SocketType.REP, port); + mainServer_ = this; + debugLogger_ = debugLogger; + classMapper_ = classMapper; util_ = new ZMQUtil(cls, excludePaths); @@ -89,7 +92,6 @@ public ZMQServer(Collection cls, Function classMappe packages_.addAll(Stream.of(Package.getPackages()).map(p -> p.getName()).collect(Collectors.toList())); } } - debugLogger_ = debugLogger; } public static ZMQServer getMasterServer() { @@ -440,8 +442,8 @@ private JSONObject runMethod(Object obj, JSONObject message, boolean staticMetho protected JSONObject parseAndExecuteCommand(JSONObject request) throws Exception { JSONObject reply; switch (request.getString("command")) { - case "connect": {//Connect to master server - mainServer_ = this; + case "connect": { + // Connect to the server debug_ = request.getBoolean("debug"); //Called by master process reply = new JSONObject(); diff --git a/java/src/main/java/org/micromanager/lightsheet/StackResamplersImageProcessor.java b/java/src/main/java/org/micromanager/lightsheet/StackResamplersImageProcessor.java index b9d31327..f65c2c55 100644 --- a/java/src/main/java/org/micromanager/lightsheet/StackResamplersImageProcessor.java +++ b/java/src/main/java/org/micromanager/lightsheet/StackResamplersImageProcessor.java @@ -1,5 +1,6 @@ package org.micromanager.lightsheet; +import java.util.concurrent.BlockingQueue; import mmcorej.TaggedImage; import mmcorej.org.json.JSONException; import mmcorej.org.json.JSONObject; @@ -73,7 +74,7 @@ public StackResamplersImageProcessor(int mode, double theta, double cameraPixelS /** * For testing purposes only */ - LinkedBlockingDeque getOutputQueue() { + BlockingQueue getOutputQueue() { return sink_; } diff --git a/java/src/main/java/org/micromanager/remote/RemoteAcquisitionFactory.java b/java/src/main/java/org/micromanager/remote/RemoteAcquisitionFactory.java index ed5ee3df..9aa8483b 100644 --- a/java/src/main/java/org/micromanager/remote/RemoteAcquisitionFactory.java +++ b/java/src/main/java/org/micromanager/remote/RemoteAcquisitionFactory.java @@ -44,12 +44,10 @@ public RemoteAcquisition createAcquisition(String dir, String name, boolean show int savingQueueSize, boolean debug) { RemoteEventSource eventSource = new RemoteEventSource(); RemoteViewerStorageAdapter adapter = null; - if (name != null && dir != null) { - // Saving to disk - adapter = new RemoteViewerStorageAdapter(showViewer, dir, name, false, 0,0, - null, savingQueueSize); - } + adapter = new RemoteViewerStorageAdapter(showViewer, dir, name, false, 0,0, + null, savingQueueSize); + return new RemoteAcquisition(eventSource, adapter, debug); } diff --git a/java/src/main/java/org/micromanager/remote/RemoteImageProcessor.java b/java/src/main/java/org/micromanager/remote/RemoteImageProcessor.java index 141d3078..c825c59f 100644 --- a/java/src/main/java/org/micromanager/remote/RemoteImageProcessor.java +++ b/java/src/main/java/org/micromanager/remote/RemoteImageProcessor.java @@ -5,6 +5,7 @@ */ package org.micromanager.remote; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingDeque; @@ -30,7 +31,7 @@ public class RemoteImageProcessor implements TaggedImageProcessor { private ExecutorService pushExecutor_, pullExecutor_; - volatile LinkedBlockingDeque source_, sink_; + volatile BlockingQueue source_, sink_; ZMQPushSocket pushSocket_; ZMQPullSocket pullSocket_; @@ -97,7 +98,7 @@ public void startPush() { while (true) { if (source_ != null) { try { - TaggedImage img = source_.takeFirst(); + TaggedImage img = source_.take(); pushSocket_.push(img); if (img.tags == null && img.pix == null) { // all images have been pushed @@ -124,7 +125,7 @@ public void startPull() { if (sink_ != null) { try { TaggedImage ti = pullSocket_.next(); - sink_.putLast(ti); + sink_.put(ti); if (ti.pix == null && ti.tags == null) { pullExecutor_.shutdown(); break; @@ -146,6 +147,12 @@ public void startPull() { @Override public void setAcqAndDequeues(AcquisitionAPI acq, LinkedBlockingDeque source, LinkedBlockingDeque sink) { + // This is deprecated, remove in a future version once its taken out of API + } + + @Override + public void setAcqAndQueues(AcquisitionAPI acq, BlockingQueue source, + BlockingQueue sink) { source_ = source; sink_ = sink; } diff --git a/java/src/main/java/org/micromanager/remote/RemoteNotificationHandler.java b/java/src/main/java/org/micromanager/remote/RemoteNotificationHandler.java index f2caddbc..8b1e4ad9 100644 --- a/java/src/main/java/org/micromanager/remote/RemoteNotificationHandler.java +++ b/java/src/main/java/org/micromanager/remote/RemoteNotificationHandler.java @@ -9,9 +9,7 @@ import org.micromanager.acqj.api.AcqNotificationListener; import org.micromanager.acqj.api.AcquisitionAPI; import org.micromanager.acqj.main.AcqNotification; -import org.micromanager.acqj.main.Acquisition; import org.micromanager.internal.zmq.ZMQPushSocket; -import org.micromanager.ndtiffstorage.IndexEntryData; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -55,6 +53,8 @@ public void run() { public void start() { //constantly poll the socket for more event sequences to submit executor_.submit(() -> { + boolean eventsFinished = false; + boolean dataSinkFinished = false; while (true) { AcqNotification e = null; try { @@ -66,8 +66,14 @@ public void start() { } pushSocket_.push(e); - if (e.isAcquisitionFinishedNotification()) { - return; + if (e.isAcquisitionEventsFinishedNotification()) { + eventsFinished = true; + } + if (e.isDataSinkFinishedNotification()) { + dataSinkFinished = true; + } + if (eventsFinished && dataSinkFinished) { + break; } } }); @@ -80,7 +86,8 @@ public void postNotification(AcqNotification n) { /** * Called by the python side to signal that the final shutdown signal has been received - * and that the push socket can be closed + * and that the push socket can be closed. Because otherwise it wouldn't be possible + * to know when the ZMQ push socket has finished doing its thing */ public void notificationHandlingComplete() { executor_.submit(() -> { diff --git a/java/src/main/java/org/micromanager/remote/RemoteStorageMonitor.java b/java/src/main/java/org/micromanager/remote/RemoteStorageMonitor.java index e6890299..2dbcc2f1 100644 --- a/java/src/main/java/org/micromanager/remote/RemoteStorageMonitor.java +++ b/java/src/main/java/org/micromanager/remote/RemoteStorageMonitor.java @@ -19,6 +19,7 @@ * A class that broadcasts information about images that have finsihed saving to disk * @author henrypinkard */ +@Deprecated public class RemoteStorageMonitor implements ImageWrittenListener { private ZMQPushSocket pushSocket_; diff --git a/java/src/main/java/org/micromanager/remote/RemoteViewerStorageAdapter.java b/java/src/main/java/org/micromanager/remote/RemoteViewerStorageAdapter.java index 02f8b041..e90f1763 100644 --- a/java/src/main/java/org/micromanager/remote/RemoteViewerStorageAdapter.java +++ b/java/src/main/java/org/micromanager/remote/RemoteViewerStorageAdapter.java @@ -11,6 +11,8 @@ import org.micromanager.acqj.api.AcqEngJDataSink; import org.micromanager.acqj.main.Acquisition; import org.micromanager.acqj.internal.Engine; +import org.micromanager.ndtiffstorage.IndexEntryData; +import org.micromanager.ndtiffstorage.NDRAMStorage; import org.micromanager.ndtiffstorage.NDTiffStorage; import org.micromanager.ndtiffstorage.MultiresNDTiffAPI; import org.micromanager.ndtiffstorage.NDTiffAPI; @@ -34,8 +36,8 @@ class RemoteViewerStorageAdapter implements NDViewerDataSource, AcqEngJDataSink, private volatile NDViewerAPI viewer_; private volatile Acquisition acq_; - private volatile MultiresNDTiffAPI storage_; - private final boolean showViewer_, storeData_, xyTiled_; + private volatile NDTiffAPI storage_; + private final boolean showViewer_, xyTiled_; private final int tileOverlapX_, tileOverlapY_; private String dir_; private String name_; @@ -58,7 +60,6 @@ public RemoteViewerStorageAdapter(boolean showViewer, String dataStorageLocation int tileOverlapY, Integer maxResLevel, int savingQueueSize) { showViewer_ = showViewer; - storeData_ = dataStorageLocation != null; xyTiled_ = xyTiled; dir_ = dataStorageLocation; name_ = name; @@ -71,13 +72,19 @@ public RemoteViewerStorageAdapter(boolean showViewer, String dataStorageLocation public void initialize(Acquisition acq, JSONObject summaryMetadata) { acq_ = acq; - if (storeData_) { - if (xyTiled_) { - //tiled datasets have a fixed, acquisition-wide image size - AcqEngMetadata.setWidth(summaryMetadata, (int) Engine.getCore().getImageWidth()); - AcqEngMetadata.setHeight(summaryMetadata, (int) Engine.getCore().getImageHeight()); - } + if (xyTiled_) { + //tiled datasets have a fixed, acquisition-wide image size + AcqEngMetadata.setWidth(summaryMetadata, (int) Engine.getCore().getImageWidth()); + AcqEngMetadata.setHeight(summaryMetadata, (int) Engine.getCore().getImageHeight()); + } + + if (dir_ == null) { + storage_ = new NDRAMStorage(summaryMetadata); + if (name_ == null) { + name_ = "In RAM acquisition"; + } + } else { storage_ = new NDTiffStorage(dir_, name_, summaryMetadata, tileOverlapX_, tileOverlapY_, xyTiled_, maxResLevel_, savingQueueSize_, @@ -87,7 +94,6 @@ public void initialize(Acquisition acq, JSONObject summaryMetadata) { }) : null, true ); name_ = storage_.getUniqueAcqName(); - } if (showViewer_) { @@ -119,32 +125,30 @@ private void createDisplay(JSONObject summaryMetadata) { viewer_.setReadZMetadataFunction((JSONObject tags) -> AcqEngMetadata.getStageZIntended(tags)); } - public void putImage(final TaggedImage taggedImg) { + public Object putImage(final TaggedImage taggedImg) { HashMap axes = AcqEngMetadata.getAxes(taggedImg.tags); - final Future added; + final Future added; if (xyTiled_) { - added = storage_.putImageMultiRes(taggedImg.pix, taggedImg.tags, axes, + added = ((MultiresNDTiffAPI)storage_).putImageMultiRes(taggedImg.pix, taggedImg.tags, axes, AcqEngMetadata.isRGB(taggedImg.tags), AcqEngMetadata.getBitDepth(taggedImg.tags), AcqEngMetadata.getHeight(taggedImg.tags), AcqEngMetadata.getWidth(taggedImg.tags)); } else { - added = null; - storage_.putImage(taggedImg.pix, taggedImg.tags, axes, + added = storage_.putImage(taggedImg.pix, taggedImg.tags, axes, AcqEngMetadata.isRGB(taggedImg.tags), AcqEngMetadata.getBitDepth(taggedImg.tags), AcqEngMetadata.getHeight(taggedImg.tags), AcqEngMetadata.getWidth(taggedImg.tags)); } - if (showViewer_) { //put on different thread to not slow down acquisition displayCommunicationExecutor_.submit(new Runnable() { @Override public void run() { try { - if (added != null) { + if (xyTiled_) { // This is needed to make sure multi res data at higher // resolutions kept up to date I think because lower resolutions // aren't stored temporarily. This could potentially be @@ -165,6 +169,11 @@ public void run() { } }); } + try { + return added.get(); + } catch (Exception e) { + throw new RuntimeException(e); + } } ///////// Data source interface for Viewer ////////// @@ -177,9 +186,13 @@ public int[] getBounds() { public TaggedImage getImageForDisplay(HashMap axes, int resolutionindex, double xOffset, double yOffset, int imageWidth, int imageHeight) { - return storage_.getDisplayImage( - axes, resolutionindex, (int) xOffset, (int) yOffset, - imageWidth, imageHeight); + if (storage_ instanceof MultiresNDTiffAPI) { + return ((MultiresNDTiffAPI) storage_).getDisplayImage( + axes, resolutionindex, (int) xOffset, (int) yOffset, + imageWidth, imageHeight); + } else { + return storage_.getSubImage(axes, (int) xOffset, (int) yOffset, imageWidth, imageHeight); + } } @Override @@ -189,12 +202,17 @@ public Set> getImageKeys() { @Override public int getMaxResolutionIndex() { - return storage_.getNumResLevels() - 1; + if (storage_ instanceof MultiresNDTiffAPI) { + return ((MultiresNDTiffAPI) storage_).getNumResLevels() - 1; + } + return 0; } @Override public void increaseMaxResolutionLevel(int newMaxResolutionLevel) { - storage_.increaseMaxResolutionLevel(newMaxResolutionLevel); + if (storage_ instanceof MultiresNDTiffAPI) { + ((MultiresNDTiffAPI) storage_).increaseMaxResolutionLevel(newMaxResolutionLevel); + } } @Override diff --git a/pycromanager/__init__.py b/pycromanager/__init__.py index 4eeb6ed3..242f02aa 100644 --- a/pycromanager/__init__.py +++ b/pycromanager/__init__.py @@ -1,8 +1,10 @@ name = "pycromanager" -from pycromanager.acquisitions import Acquisition, MagellanAcquisition, XYTiledAcquisition, ExploreAcquisition -from pycromanager.acq_util import start_headless, multi_d_acquisition_events -from ndtiff import Dataset -from pycromanager.mm_java_classes import Studio, Magellan, Core +from pycromanager.acquisition.java_backend_acquisitions import JavaBackendAcquisition, MagellanAcquisition, XYTiledAcquisition, ExploreAcquisition +from pycromanager.acquisition.acquisition_superclass import multi_d_acquisition_events +from pycromanager.acquisition.acq_constructor import Acquisition +from pycromanager.headless import start_headless, stop_headless +from pycromanager.mm_java_classes import Studio, Magellan +from pycromanager.core import Core from pycromanager.zmq_bridge.wrappers import JavaObject, JavaClass, PullSocket, PushSocket from ._version import __version__, version_info diff --git a/pycromanager/_version.py b/pycromanager/_version.py index 03d6b15b..7feff9bf 100644 --- a/pycromanager/_version.py +++ b/pycromanager/_version.py @@ -1,2 +1,2 @@ -version_info = (0, 28, 2) +version_info = (0, 29, 0) __version__ = ".".join(map(str, version_info)) diff --git a/pycromanager/notifications.py b/pycromanager/acq_future.py similarity index 60% rename from pycromanager/notifications.py rename to pycromanager/acq_future.py index 4f5841e5..d4d0f581 100644 --- a/pycromanager/notifications.py +++ b/pycromanager/acq_future.py @@ -1,55 +1,5 @@ import threading - -class AcqNotification: - - class Global: - ACQ_STARTED = "acq_started" - ACQ_FINISHED = "acq_finished" - - class Hardware: - PRE_HARDWARE = "pre_hardware" - POST_HARDWARE = "post_hardware" - - class Camera: - PRE_SEQUENCE_STARTED = "pre_sequence_started" - PRE_SNAP = "pre_snap" - POST_EXPOSURE = "post_exposure" - - class Image: - IMAGE_SAVED = "image_saved" - - def __init__(self, type, axes, phase=None): - if type is None: - # then figure it out based on the phase - if phase in [AcqNotification.Camera.PRE_SNAP, AcqNotification.Camera.POST_EXPOSURE, - AcqNotification.Camera.PRE_SEQUENCE_STARTED]: - type = AcqNotification.Camera - elif phase in [AcqNotification.Hardware.PRE_HARDWARE, AcqNotification.Hardware.POST_HARDWARE]: - type = AcqNotification.Hardware - elif phase == AcqNotification.Image.IMAGE_SAVED: - type = AcqNotification.Image - else: - raise ValueError("Unknown phase") - self.type = type - self.phase = phase - self.axes = axes - - @staticmethod - def make_image_saved_notification(axes): - return AcqNotification(AcqNotification.Image, axes, AcqNotification.Image.IMAGE_SAVED) - - def to_json(self): - return { - 'type': self.type, - 'phase': self.phase, - 'axes': self.axes, - } - - @staticmethod - def from_json(json): - return AcqNotification(json['type'], json['axes'] if 'axes' in json else None, - json['phase'] if 'phase' in json else None) - +from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification def _axes_to_key(axes_or_axes_list): """ Turn axes into a hashable key """ @@ -88,7 +38,7 @@ def _notify(self, notification): received. Want to store this, rather than just waiting around for it, in case the await methods are called after the notification has already been sent. """ - if notification.type == AcqNotification.Global.ACQ_FINISHED: + if notification.type == AcqNotification.Acquisition.ACQ_EVENTS_FINISHED: return # ignore for now... key = _axes_to_key(notification.axes) if key not in self._notification_recieved.keys(): diff --git a/pycromanager/acq_util.py b/pycromanager/acq_util.py deleted file mode 100644 index 3a352c3f..00000000 --- a/pycromanager/acq_util.py +++ /dev/null @@ -1,301 +0,0 @@ -import subprocess -import platform -import atexit -import threading - -from pycromanager.zmq_bridge._bridge import _Bridge -import copy -import types -import numpy as np -from typing import Union, List, Iterable - -SUBPROCESSES = [] - -def stop_headless(): - for p in SUBPROCESSES: - p.terminate() - p.wait() # wait for process to terminate - SUBPROCESSES.clear() - -# make sure any Java processes are cleaned up when Python exits -atexit.register(stop_headless) - -def start_headless( - mm_app_path: str, config_file: str='', java_loc: str=None, core_log_path: str='', - buffer_size_mb: int=1024, max_memory_mb: int=2000, - port: int=_Bridge.DEFAULT_PORT, debug=False): - """ - Start a Java process that contains the neccessary libraries for pycro-manager to run, - so that it can be run independently of the Micro-Manager GUI/application. This calls - will create and initialize MMCore with the configuration file provided. - - On windows plaforms, the Java Runtime Environment will be grabbed automatically - as it is installed along with the Micro-Manager application. - - On non-windows platforms, it may need to be installed/specified manually in order to ensure compatibility. - Installing Java 11 is the most likely version to work without issue - - Parameters - ---------- - mm_app_path : str - Path to top level folder of Micro-Manager installation (made with graphical installer) - config_file : str - Path to micro-manager config file, with which core will be initialized. If None then initialization - is left to the user. - java_loc: str - Path to the java version that it should be run with - core_log_path : str - Path to where core log files should be created - buffer_size_mb : int - Size of circular buffer in MB in MMCore - max_memory_mb : int - Maximum amount of memory to be allocated to JVM - port : int - Default port to use for ZMQServer - debug : bool - Print debug messages - """ - - classpath = mm_app_path + '/plugins/Micro-Manager/*' - if java_loc is None: - if platform.system() == "Windows": - # windows comes with its own JRE - java_loc = mm_app_path + "/jre/bin/javaw.exe" - else: - java_loc = "java" - # This starts Java process and instantiates essential objects (core, - # acquisition engine, ZMQServer) - process = subprocess.Popen( - [ - java_loc, - "-classpath", - classpath, - "-Dsun.java2d.dpiaware=false", - f"-Xmx{max_memory_mb}m", - - # This is used by MM desktop app but breaks things on MacOS...Don't think its neccessary - # "-XX:MaxDirectMemorySize=1000", - "org.micromanager.remote.HeadlessLauncher", - str(port), - config_file, - str(buffer_size_mb), - core_log_path, - ], cwd=mm_app_path, stdout=subprocess.PIPE - ) - SUBPROCESSES.append(process) - - started = False - output = True - # Some drivers output various status messages which need to be skipped over to look for the STARTED token. - while output and not started: - output = process.stdout.readline() - started = "STARTED" in output.decode('utf-8') - if not started: - raise Exception('Error starting headless mode') - if debug: - print('Headless mode started') - def logger(): - while True: - print(process.stdout.readline().decode('utf-8')) - threading.Thread(target=logger).start() - -def multi_d_acquisition_events( - num_time_points: int=None, - time_interval_s: Union[float, List[float]]=0, - z_start: float=None, - z_end: float=None, - z_step: float=None, - channel_group: str=None, - channels: list=None, - channel_exposures_ms: list=None, - xy_positions: Iterable=None, - xyz_positions: Iterable=None, - position_labels: List[str]=None, - order: str="tpcz", - keep_shutter_open_between_channels: bool=False, - keep_shutter_open_between_z_steps: bool=False, -): - """Convenience function for generating the events of a typical multi-dimensional acquisition (i.e. an - acquisition with some combination of multiple timepoints, channels, z-slices, or xy positions) - - Parameters - ---------- - num_time_points : int - How many time points if it is a timelapse (Default value = None) - time_interval_s : float or list of floats - the minimum interval between consecutive time points in seconds. If set to 0, the - acquisition will go as fast as possible. If a list is provided, its length should - be equal to 'num_time_points'. Elements in the list are assumed to be the intervals - between consecutive timepoints in the timelapse. First element in the list indicates - delay before capturing the first image (Default value = 0) - z_start : float - z-stack starting position, in µm. If xyz_positions is given z_start is relative - to the points' z position. (Default value = None) - z_end : float - z-stack ending position, in µm. If xyz_positions is given z_start is - relative to the points' z position. (Default value = None) - z_step : float - step size of z-stack, in µm (Default value = None) - channel_group : str - name of the channel group (which should correspond to a config group in micro-manager) (Default value = None) - channels : list of strings - list of channel names, which correspond to possible settings of the config group - (e.g. ['DAPI', 'FITC']) (Default value = None) - channel_exposures_ms : list of floats or ints - list of camera exposure times corresponding to each channel. The length of this list - should be the same as the the length of the list of channels (Default value = None) - xy_positions : iterable - An array of shape (N, 2) containing N (X, Y) stage coordinates. (Default value = None) - xyz_positions : iterable - An array of shape (N, 3) containing N (X, Y, Z) stage coordinates. (Default value = None). - If passed then z_start, z_end, and z_step will be relative to the z_position in xyz_positions. (Default value = None) - position_labels : iterable - An array of length N containing position labels for each of the XY stage positions. (Default value = None) - order : str - string that specifies the order of different dimensions. Must have some ordering of the letters - c, t, p, and z. For example, 'tcz' would run a timelapse where z stacks would be acquired at each channel in - series. 'pt' would move to different xy stage positions and run a complete timelapse at each one before moving - to the next (Default value = 'tpcz') - keep_shutter_open_between_channels : bool - don't close the shutter in between channels (Default value = False) - keep_shutter_open_between_z_steps : bool - don't close the shutter during steps of a z stack (Default value = False) - - Returns - ------- - events : dict - """ - if xy_positions is not None and xyz_positions is not None: - raise ValueError( - "xyz_positions and xy_positions are incompatible arguments that cannot be passed together" - ) - order = order.lower() - if "p" in order and "z" in order and order.index("p") > order.index("z"): - raise ValueError( - "This function requires that the xy position come earlier in the order than z" - ) - if isinstance(time_interval_s, list): - if len(time_interval_s) != num_time_points: - raise ValueError( - "Length of time interval list should be equal to num_time_points" - ) - if position_labels is not None: - if xy_positions is not None and len(xy_positions) != len(position_labels): - raise ValueError("xy_positions and position_labels must be of equal length") - if xyz_positions is not None and len(xyz_positions) != len(position_labels): - raise ValueError("xyz_positions and position_labels must be of equal length") - - # If any of z_start, z_step, z_end are provided, then they should all be provided - # Here we can't use `all` as some of the values of z_start, z_step, z_end - # may be zero and all((0,)) = False - has_zsteps = False - if any([z_start, z_step, z_end]): - if not None in [z_start, z_step, z_end]: - has_zsteps = True - else: - raise ValueError('All of z_start, z_step, and z_end must be provided') - - z_positions = None - if xy_positions is not None: - xy_positions = np.asarray(xy_positions) - z_positions = None - elif xyz_positions is not None: - xyz_positions = np.asarray(xyz_positions) - xy_positions = xyz_positions[:, :2] - z_positions = xyz_positions[:, 2][:, None] - - if has_zsteps: - z_rel = np.arange(z_start, z_end + z_step, z_step) - if z_positions is None: - z_positions = z_rel - if xy_positions is not None: - z_positions = np.broadcast_to( - z_positions, (xy_positions.shape[0], z_positions.shape[0]) - ) - else: - pos = [] - for z in z_positions: - pos.append(z + z_rel) - z_positions = np.asarray(pos) - - if position_labels is None and xy_positions is not None: - position_labels = list(range(len(xy_positions))) - - def generate_events(event, order): - if len(order) == 0: - yield event - return - elif order[0] == "t" and num_time_points is not None and num_time_points > 0: - time_indices = np.arange(num_time_points) - if isinstance(time_interval_s, list): - absolute_start_times = np.cumsum(time_interval_s) - for time_index in time_indices: - new_event = copy.deepcopy(event) - new_event["axes"]["time"] = time_index - if isinstance(time_interval_s, list): - new_event["min_start_time"] = absolute_start_times[time_index] - else: - if time_interval_s != 0: - new_event["min_start_time"] = time_index * time_interval_s - yield generate_events(new_event, order[1:]) - elif order[0] == "z" and z_positions is not None: - if "axes" in event and "position" in event["axes"]: - pos_idx = position_labels.index(event["axes"]["position"]) - zs = z_positions[pos_idx] - else: - zs = z_positions - - for z_index, z in enumerate(zs): - new_event = copy.deepcopy(event) - new_event["axes"]["z"] = z_index - new_event["z"] = z - if keep_shutter_open_between_z_steps: - new_event["keep_shutter_open"] = True - yield generate_events(new_event, order[1:]) - elif order[0] == "p" and xy_positions is not None: - for p_label, xy in zip(position_labels, xy_positions): - new_event = copy.deepcopy(event) - new_event["axes"]["position"] = p_label - new_event["x"] = xy[0] - new_event["y"] = xy[1] - yield generate_events(new_event, order[1:]) - elif order[0] == "c" and channel_group is not None and channels is not None: - for i in range(len(channels)): - new_event = copy.deepcopy(event) - new_event["config_group"] = [channel_group, channels[i]] - new_event["axes"]["channel"] = channels[i] - if channel_exposures_ms is not None: - new_event["exposure"] = channel_exposures_ms[i] - if keep_shutter_open_between_channels: - new_event["keep_shutter_open"] = True - yield generate_events(new_event, order[1:]) - else: - # this axis appears to be missing - yield generate_events(event, order[1:]) - - # collect all events into a single list - base_event = {"axes": {}} - events = [] - - def appender(next): - """ - - Parameters - ---------- - next : - - - Returns - ------- - - """ - if isinstance(next, types.GeneratorType): - for n in next: - appender(n) - else: - events.append(next) - - appender(generate_events(base_event, order)) - return events - - diff --git a/pycromanager/acquisition/RAMStorage.py b/pycromanager/acquisition/RAMStorage.py new file mode 100644 index 00000000..25b21967 --- /dev/null +++ b/pycromanager/acquisition/RAMStorage.py @@ -0,0 +1,138 @@ +# A class for holding data in RAM + +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +import numpy as np +from sortedcontainers import SortedSet +import threading + + +class RAMDataStorage: + """ + A class for holding data in RAM + Implements the methods needed to be a DataSink for AcqEngPy + """ + + def __init__(self): + self.finished = False + self.images = {} + self.image_metadata = {} + self.axes = {} + self.finished_event = threading.Event() + + def initialize(self, acq, summary_metadata: dict): + self.summary_metadata = summary_metadata + + def block_until_finished(self, timeout=None): + self.finished_event.wait(timeout=timeout) + + def finish(self): + self.finished = True + + def is_finished(self) -> bool: + return self.finished + + def put_image(self, tagged_image): + self.bytes_per_pixel = tagged_image.pix.dtype.itemsize + self.dtype = tagged_image.pix.dtype + tags = tagged_image.tags + axes = AcqEngMetadata.get_axes(tags) + key = frozenset(axes.items()) + self.images[key] = tagged_image.pix + self.image_metadata[key] = tags + for axis in axes.keys(): + if axis not in self.axes: + self.axes[axis] = SortedSet() + self.axes[axis].add(axes[axis]) + self._new_image_arrived = True + + def anything_acquired(self) -> bool: + return self.images != {} + + def has_image(self, channel: int or str, z: int, position: int, + time: int, row: int, column: int, **kwargs): + axes = self._consolidate_axes(channel, z, position, time, row, column, **kwargs) + key = frozenset(axes.items()) + return key in self.images.keys() + + def read_image(self, channel=None, z=None, time=None, position=None, row=None, column=None, **kwargs): + axes = self._consolidate_axes(channel, z, position, time, row, column, **kwargs) + key = frozenset(axes.items()) + if key not in self.index: + raise Exception("image with keys {} not present in data set".format(key)) + return self.images[key] + + def read_metadata(self, channel=None, z=None, time=None, position=None, row=None, column=None, **kwargs): + axes = self._consolidate_axes(channel, z, position, time, row, column, **kwargs) + key = frozenset(axes.items()) + if key not in self.index: + raise Exception("image with keys {} not present in data set".format(key)) + return self.image_metadata[key] + + def _consolidate_axes(self, channel: int or str, z: int, position: int, + time: int, row: int, column: int, **kwargs): + """ + Pack axes into a convenient format + """ + axis_positions = {'channel': channel, 'z': z, 'position': position, + 'time': time, 'row': row, 'column': column, **kwargs} + # ignore ones that are None + axis_positions = {n: axis_positions[n] for n in axis_positions.keys() if axis_positions[n] is not None} + for axis_name in axis_positions.keys(): + # convert any string-valued axes passed as ints into strings + if self.axes_types[axis_name] == str and type(axis_positions[axis_name]) == int: + axis_positions[axis_name] = self._string_axes_values[axis_name][axis_positions[axis_name]] + + return axis_positions + + def has_new_image(self): + """ + For datasets currently being acquired, check whether a new image has arrived since this function + was last called, so that a viewer displaying the data can be updated. + """ + # pass through to full resolution, since only this is monitored in current implementation + if not hasattr(self, '_new_image_arrived'): + return False # pre-initilization + new = self._new_image_arrived + self._new_image_arrived = False + return new + + def as_array(self, axes=None, **kwargs): + """ + Read all data image data as one big numpy array with last two axes as y, x and preceeding axes depending on data. + If the data doesn't fully fill out the array (e.g. not every z-slice collected at every time point), zeros will + be added automatically. + + This function is modeled of the same one in the NDTiff library, but it uses numpy arrays instead of dask arrays + because the data is already in RAM + + Parameters + ---------- + axes : list + list of axes names over which to iterate and merge into a stacked array. The order of axes supplied in this + list will be the order of the axes of the returned dask array. If None, all axes will be used in PTCZYX order. + + **kwargs : + names and integer positions of axes on which to slice data + """ + if axes is None: + axes = self.axes.keys() + + empty_image = np.zeros_like(list(self.images.values())[0]) + indices = [np.array(self.axes[axis_name]) for axis_name in list(axes)] + gridded = np.meshgrid(*indices, indexing='ij') + result = np.stack(gridded, axis=-1) + flattened = result.reshape((-1, result.shape[-1])) + images = [] + for coord in flattened: + images_key = {key: coord[i] for i, key in enumerate(axes)} + key = frozenset(images_key.items()) + if key in self.images.keys(): + images.append(self.images[key]) + else: + images.append(empty_image) + # reshape to Num axes + image size dimensions + cube = np.array(images).reshape(tuple(len(i) for i in indices) + empty_image.shape) + return cube + + + diff --git a/pycromanager/acquisition/__init__.py b/pycromanager/acquisition/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/acq_constructor.py b/pycromanager/acquisition/acq_constructor.py new file mode 100644 index 00000000..6358c358 --- /dev/null +++ b/pycromanager/acquisition/acq_constructor.py @@ -0,0 +1,45 @@ +from pycromanager.headless import _PYMMCORES +from pycromanager.acquisition.java_backend_acquisitions import JavaBackendAcquisition +from pycromanager.acquisition.python_backend_acquisitions import PythonBackendAcquisition +from pycromanager.acquisition.acquisition_superclass import Acquisition as PycromanagerAcquisitionBase +from inspect import signature + +# This is a convenience class that automatically selects the appropriate acquisition +# type based on backend is running. It is subclassed from the base acquisition class +# so that it can inherit its docstrings. It cant be the parent class of it, or else +# there will be a circular import +class Acquisition(PycromanagerAcquisitionBase): + def __new__(cls, + directory: str = None, + name: str = None, + image_process_fn: callable = None, + event_generation_hook_fn: callable = None, + pre_hardware_hook_fn: callable = None, + post_hardware_hook_fn: callable = None, + post_camera_hook_fn: callable = None, + notification_callback_fn: callable = None, + image_saved_fn: callable = None, + napari_viewer=None, + debug: int = False, + **kwargs): + # package up all the arguments + arg_names = [k for k in signature(Acquisition.__init__).parameters.keys() if k != 'self'] + l = locals() + named_args = {arg_name: (l[arg_name] if arg_name in l else + dict(signature(Acquisition.__init__).parameters.items())[arg_name].default) + for arg_name in arg_names } + + if _PYMMCORES: + # Python backend detected, so create a python backend acquisition + specific_arg_names = [k for k in signature(JavaBackendAcquisition.__init__).parameters.keys() if k != 'self'] + for name in specific_arg_names: + if name in kwargs: + named_args[name] = kwargs[name] + return PythonBackendAcquisition(**named_args) + else: + # add any kwargs are specific to java backend + specific_arg_names = [k for k in signature(JavaBackendAcquisition.__init__).parameters.keys() if k != 'self'] + for name in specific_arg_names: + if name in kwargs: + named_args[name] = kwargs[name] + return JavaBackendAcquisition(**named_args) diff --git a/pycromanager/acquisition/acq_eng_py/__init__.py b/pycromanager/acquisition/acq_eng_py/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/acq_eng_py/internal/__init__.py b/pycromanager/acquisition/acq_eng_py/internal/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/acq_eng_py/internal/engine.py b/pycromanager/acquisition/acq_eng_py/internal/engine.py new file mode 100644 index 00000000..875c6705 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/internal/engine.py @@ -0,0 +1,735 @@ +import traceback +from concurrent.futures import Future +from concurrent.futures import ThreadPoolExecutor +import time +import datetime + +from pycromanager.acquisition.acq_eng_py.main.acquisition_event import AcquisitionEvent +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +from pycromanager.acquisition.acq_eng_py.internal.hardware_sequences import HardwareSequences +import pymmcore +from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification + +HARDWARE_ERROR_RETRIES = 6 +DELAY_BETWEEN_RETRIES_MS = 5 + +class HardwareControlException(Exception): + def __init__(self, message=""): + super().__init__(message) + +class Engine: + def __init__(self, core): + if not hasattr(Engine, 'singleton'): + Engine.singleton = self + self.last_event = None + self.core = core + self.acq_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix='Acquisition Engine Thread') + self.event_generator_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix='Acq Eng event generator') + self.sequenced_events = [] + + def shutdown(self): + self.acq_executor.shutdown() + self.event_generator_executor.shutdown() + + @staticmethod + def get_core(): + return Engine.singleton.core + + @staticmethod + def get_instance(): + return Engine.singleton + + def finish_acquisition(self, acq): + def finish_acquisition_inner(): + if acq.is_debug_mode(): + Engine.get_core().logMessage("recieved acquisition finished signal") + self.sequenced_events.clear() + if acq.is_debug_mode(): + Engine.get_core().logMessage("creating acquisition finished event") + self.execute_acquisition_event(AcquisitionEvent.create_acquisition_finished_event(acq)) + acq.block_until_events_finished() + + return self.event_generator_executor.submit(finish_acquisition_inner) + + def submit_event_iterator(self, event_iterator): + def submit_event_iterator_inner(): + acq = None + while True: + try: + event = next(event_iterator, None) + except StopIteration: + traceback.print_exc() + break + if event is None: + break # iterator exhausted + acq = event.acquisition_ + if acq.is_debug_mode(): + Engine.get_core().logMessage("got event: " + event.to_string()) + for h in event.acquisition_.get_event_generation_hooks(): + event = h.run(event) + if event is None: + return + while event.acquisition_.is_paused(): + time.sleep(0.005) + try: + if acq.is_abort_requested(): + if acq.is_debug_mode(): + Engine.get_core().logMessage("acquisition aborted") + return + image_acquired_future = self.process_acquisition_event(event) + image_acquired_future.result() + + except Exception as ex: + traceback.print_exc() + acq.abort(ex) + raise ex + + last_image_future = self.process_acquisition_event(AcquisitionEvent.create_acquisition_sequence_end_event(acq)) + last_image_future.result() + + + return self.event_generator_executor.submit(submit_event_iterator_inner) + + + def process_acquisition_event(self, event: AcquisitionEvent) -> Future: + def process_acquisition_event_inner(): + try: + if event.acquisition_.is_debug_mode(): + self.core.logMessage("Processing event: " + event.to_string()) + if event.acquisition_.is_debug_mode(): + self.core.logMessage("checking for sequencing") + if not self.sequenced_events and not event.is_acquisition_sequence_end_event(): + self.sequenced_events.append(event) + elif self.is_sequencable(self.sequenced_events, event, len(self.sequenced_events) + 1): + # merge event into the sequence + self.sequenced_events.append(event) + else: + # all events + sequence_event = self.merge_sequence_event(self.sequenced_events) + self.sequenced_events.clear() + # Add in the start of the new sequence + if not event.is_acquisition_sequence_end_event(): + self.sequenced_events.append(event) + if event.acquisition_.is_debug_mode(): + self.core.logMessage("executing acquisition event") + try: + self.execute_acquisition_event(sequence_event) + except HardwareControlException as e: + raise e + except Exception as e: + traceback.print_exc() + if self.core.is_sequence_running(): + self.core.stop_sequence_acquisition() + raise e + + + return self.acq_executor.submit(process_acquisition_event_inner) + + def execute_acquisition_event(self, event: AcquisitionEvent): + # check if we should pause until the minimum start time of the event has occured + while event.get_minimum_start_time_absolute() is not None and \ + time.time() * 1000 < event.get_minimum_start_time_absolute(): + wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 + event.acquisition_.block_unless_aborted(wait_time) + + if event.is_acquisition_finished_event(): + # signal to finish saving thread and mark acquisition as finished + if event.acquisition_.are_events_finished(): + return # Duplicate finishing event, possibly from x-ing out viewer + + # send message acquisition finished message so things shut down properly + for h in event.acquisition_.get_event_generation_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_before_hardware_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_after_hardware_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_after_camera_hooks(): + h.run(event) + h.close() + for h in event.acquisition_.get_after_exposure_hooks(): + h.run(event) + h.close() + event.acquisition_.add_to_output(self.core.TaggedImage(None, None)) + event.acquisition_.post_notification(AcqNotification.create_acq_events_finished_notification()) + + else: + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.PRE_HARDWARE)) + for h in event.acquisition_.get_before_hardware_hooks(): + event = h.run(event) + if event is None: + return # The hook cancelled this event + self.abort_if_requested(event, None) + hardware_sequences_in_progress = HardwareSequences() + try: + self.prepare_hardware(event, hardware_sequences_in_progress) + except HardwareControlException as e: + self.stop_hardware_sequences(hardware_sequences_in_progress) + raise e + # TODO restore this + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Hardware, event.axisPositions_, AcqNotification.Hardware.POST_HARDWARE)) + for h in event.acquisition_.get_after_hardware_hooks(): + event = h.run(event) + if event is None: + return # The hook cancelled this event + self.abort_if_requested(event, hardware_sequences_in_progress) + # Hardware hook may have modified wait time, so check again if we should + # pause until the minimum start time of the event has occurred. + while event.get_minimum_start_time_absolute() is not None and \ + time.time() * 1000 < event.get_minimum_start_time_absolute(): + try: + self.abort_if_requested(event, hardware_sequences_in_progress) + wait_time = event.get_minimum_start_time_absolute() - time.time() * 1000 + event.acquisition_.block_unless_aborted(wait_time) + except Exception: + # Abort while waiting for next time point + return + + if event.should_acquire_image(): + if event.acquisition_.is_debug_mode(): + self.core.logMessage("acquiring image(s)") + try: + self.acquire_images(event, hardware_sequences_in_progress) + except TimeoutError: + # Don't abort on a timeout + # TODO: this could probably be an option added to the acquisition in the future + print("Timeout while acquiring images") + + # if the acquisition was aborted, make sure everything shuts down properly + self.abort_if_requested(event, hardware_sequences_in_progress) + + + def acquire_images(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: + """ + Acquire 1 or more images in a sequence, add some metadata, then + put them into an output queue. + + If the event is a sequence and a sequence acquisition is started in the core, + It should be completed by the time this method returns. + """ + camera_image_counts = event.get_camera_image_counts(self.core.get_camera_device()) + if event.get_sequence() is not None and len(event.get_sequence()) > 1: + # start sequences on one or more cameras + for camera_device_name, image_count in camera_image_counts.items(): + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SEQUENCE_STARTED)) + self.core.start_sequence_acquisition( + camera_device_name, camera_image_counts[camera_device_name], 0, True) + else: + # snap one image with no sequencing + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.PRE_SNAP)) + if event.get_camera_device_name() is not None: + current_camera = self.core.get_camera_device() + width = self.core.get_image_width() + height = self.core.get_image_height() + self.core.set_camera_device(event.get_camera_device_name()) + self.core.snap_image() + self.core.set_camera_device(current_camera) + else: + # Unlike MMCoreJ, pymmcore does not automatically add this metadata when snapping, so need to do it manually + width = self.core.get_image_width() + height = self.core.get_image_height() + self.core.snap_image() + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_EXPOSURE)) + for h in event.acquisition_.get_after_exposure_hooks(): + h.run(event) + + # get elapsed time + current_time_ms = time.time() * 1000 + if event.acquisition_.get_start_time_ms() == -1: + # first image, initialize + event.acquisition_.set_start_time_ms(current_time_ms) + + # need to assign events to images as they come out, assuming they might be in arbitrary order, + # but that each camera itself is ordered + multi_cam_adapter_camera_event_lists = None + if event.get_sequence() is not None: + multi_cam_adapter_camera_event_lists = {} + for cam_index in range(self.core.get_number_of_camera_channels()): + multi_cam_adapter_camera_event_lists[cam_index] = [] + for e in event.get_sequence(): + multi_cam_adapter_camera_event_lists[cam_index].append(e) + + # Run a hook after the camera sequence acquisition has started. This can be used for + # external triggering of the camera (when it is in sequence mode). + # note: SnapImage will block until exposure finishes. + # If it is desired that AfterCameraHooks trigger cameras + # in Snap mode, one possibility is that those hooks (or SnapImage) should run + # in a separate thread, started after snapImage is started. But there is no + # guarantee that the camera will be ready to accept a trigger at that point. + for h in event.acquisition_.get_after_camera_hooks(): + h.run(event) + + if event.acquisition_.is_debug_mode(): + self.core.log_message("images acquired, copying from core") + start_copy_time = time.time() + # Loop through and collect all acquired images. There will be + # (# of images in sequence) x (# of camera channels) of them + timeout = False + for i in range(0, 1 if event.get_sequence() is None else len(event.get_sequence())): + if timeout: + # Cancel the rest of the sequence + self.stop_hardware_sequences(hardware_sequences_in_progress) + break + try: + exposure = self.core.get_exposure() if event.get_exposure() is None else event.get_exposure() + except Exception as ex: + raise Exception("Couldnt get exposure form core") + num_cam_channels = self.core.get_number_of_camera_channels() + + need_to_run_after_exposure_hooks = len(event.acquisition_.get_after_exposure_hooks()) > 0 + for cam_index in range(num_cam_channels): + ti = None + camera_name = None + while ti is None: + if event.acquisition_.is_abort_requested(): + return + try: + if event.get_sequence() is not None and len(event.get_sequence()) > 1: + if self.core.is_buffer_overflowed(): + raise Exception("Sequence buffer overflow") + try: + ti = self.core.pop_next_tagged_image() + camera_name = ti.tags["Camera"] + except Exception as e: + # continue waiting + if not self.core.is_sequence_running() and self.core.get_remaining_image_count() == 0: + raise Exception("Expected images did not arrive in circular buffer") + # check if timeout has been exceeded. This is used in the case of a + # camera waiting for a trigger that never comes. + if event.get_sequence()[i].get_timeout_ms() is not None: + if time.time() - start_copy_time > event.get_sequence()[i].get_timeout_ms(): + timeout = True + self.core.stop_sequence_acquisition() + while self.core.is_sequence_running(): + time.sleep(0.001) + break + else: + try: + # TODO: probably there should be a timeout here too, but I'm + # not sure the snap_image system supports it (as opposed to sequences) + # This is a little different from the java version due to differences in metadata + # handling in the SWIG wrapper + camera_name = self.core.get_camera_device() + ti = self.core.get_tagged_image(self, cam_index, camera_name, height, width) + except Exception as e: + # continue waiting + pass + except Exception as ex: + # Sequence buffer overflow + e = HardwareControlException(str(ex)) + event.acquisition_.abort(e) + raise e + if need_to_run_after_exposure_hooks: + for camera_device_name in camera_image_counts.keys(): + if self.core.is_sequence_running(camera_device_name): + # all of the sequences are not yet done, so this will need to be handled + # on another iteration of the loop + break + event.acquisition_.post_notification(AcqNotification( + AcqNotification.Camera, event.axisPositions_, AcqNotification.Camera.POST_EXPOSURE)) + for h in event.acquisition_.get_after_exposure_hooks(): + h.run(event) + need_to_run_after_exposure_hooks = False + + if timeout: + break + # Doesn't seem to be a version in the API in which you don't have to do this + actual_cam_index = cam_index + if "Multi Camera-CameraChannelIndex" in ti.tags.keys() : + actual_cam_index = ti.tags["Multi Camera-CameraChannelIndex"] + if num_cam_channels == 1: + # probably a mistake in the core.... + actual_cam_index = 0 # Override index because not using multi cam mode right now + + corresponding_event = event + if event.get_sequence() is not None: + # Find the event that corresponds to the camera that captured this image. + # This assumes that the images from a single camera are in order + # in the sequence, though different camera images may be interleaved + if event.get_sequence()[0].get_camera_device_name() is not None: + # camera is specified in the acquisition event. Find the first event that matches + # this camera name. + the_camera_name = camera_name + corresponding_event = next(filter(lambda + e: e.get_camera_device_name() is not None and e.get_camera_device_name() == the_camera_name, + multi_cam_adapter_camera_event_lists.get(actual_cam_index))) + multi_cam_adapter_camera_event_lists.get(actual_cam_index).remove(corresponding_event) + else: + # multi camera adapter or just using the default camera + corresponding_event = multi_cam_adapter_camera_event_lists.get(actual_cam_index).pop(0) + # add standard metadata + AcqEngMetadata.add_image_metadata(self.core, ti.tags, corresponding_event, + current_time_ms - corresponding_event.acquisition_.get_start_time_ms(), + exposure) + # add user metadata specified in the event + corresponding_event.acquisition_.add_tags_to_tagged_image(ti.tags, corresponding_event.get_tags()) + corresponding_event.acquisition_.add_to_image_metadata(ti.tags) + corresponding_event.acquisition_.add_to_output(ti) + + if timeout: + raise TimeoutError("Timeout waiting for images to arrive in circular buffer") + + def abort_if_requested(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: + if event.acquisition_.is_abort_requested(): + if hardware_sequences_in_progress is not None: + self.stop_hardware_sequences(hardware_sequences_in_progress) + + def stop_hardware_sequences(self, hardware_sequences_in_progress: HardwareSequences) -> None: + # Stop any hardware sequences + for device_name in hardware_sequences_in_progress.device_names: + try: + if str(self.core.getDeviceType(device_name)) == "StageDevice": + str(self.core.stopStageSequence(device_name)) + elif str(self.core.getDeviceType(device_name)) == "XYStageDevice": + self.core.stopXYStageSequence(device_name) + elif (self.core.getDeviceType(device_name)) == "CameraDevice": + self.core.stopSequenceAcquisition(self.core.getCameraDevice()) + except Exception as ee: + traceback.print_exc() + self.core.logMessage("Error stopping hardware sequence: ") + # Stop any property sequences + for i in range(len(hardware_sequences_in_progress.property_names)): + try: + self.core.stopPropertySequence(hardware_sequences_in_progress.property_device_names[i], + hardware_sequences_in_progress.property_names[i]) + except Exception as ee: + traceback.print_exc() + self.core.logMessage("Error stopping property sequence: " + ee) + self.core.clear_circular_buffer() + + + def prepare_hardware(self, event: AcquisitionEvent, hardware_sequences_in_progress: HardwareSequences) -> None: + def move_xy_stage(event): + try: + if event.is_xy_sequenced(): + self.core.start_xy_stage_sequence(xy_stage) + else: + # Could be sequenced over other devices, in that case get xy position from first in sequence + prev_x_position = None if self.last_event is None else None if self.last_event.get_sequence() is None else \ + self.last_event.get_sequence()[0].get_x_position() + x_position = event.get_sequence()[ + 0].get_x_position() if event.get_sequence() is not None else event.get_x_position() + prev_y_position = None if self.last_event is None else None if self.last_event.get_sequence() is None else \ + self.last_event.get_sequence()[0].get_y_position() + y_position = event.get_sequence()[ + 0].get_y_position() if event.get_sequence() is not None else event.get_y_position() + previous_xy_defined = event is not None and prev_x_position is not None and prev_y_position is not None + current_xy_defined = event is not None and x_position is not None and y_position is not None + if not current_xy_defined: + return + xy_changed = not previous_xy_defined or not prev_x_position == x_position or not prev_y_position == y_position + if not xy_changed: + return + # Wait for it to not be busy (is this even needed?) + self.core.wait_for_device(xy_stage) + # Move XY + self.core.set_xy_position(xy_stage, x_position, y_position) + # Wait for move to finish + self.core.wait_for_device(xy_stage) + except Exception as ex: + self.core.log_message(traceback.format_exc()) + raise HardwareControlException() + + def change_channels(event): + try: + # Get the values of current channel, pulling from the first event in a sequence if one is present + current_config = event.get_sequence()[ + 0].get_config_preset() if event.get_sequence() is not None else event.get_config_preset() + current_group = event.get_sequence()[ + 0].get_config_group() if event.get_sequence() is not None else event.get_config_group() + previous_config = None if self.last_event is None else None if self.last_event.get_sequence() is None else \ + self.last_event.get_sequence()[0].get_config_preset() + new_channel = current_config is not None and ( + previous_config is None or not previous_config == current_config) + if new_channel: + # Set exposure + if event.get_exposure() is not None: + self.core.set_exposure(event.get_exposure()) + # Set other channel props + self.core.set_config(current_group, current_config) + # TODO: haven't tested if this is actually needed + self.core.wait_for_config(current_group, current_config) + if event.is_config_group_sequenced(): + # Channels + group = event.get_sequence()[0].get_config_group() + config = self.core.get_config_data(group, event.get_sequence()[0].get_config_preset()) + for i in range(config.size()): + ps = config.get_setting(i) + device_name = ps.get_device_label() + prop_name = ps.get_property_name() + if self.core.is_property_sequenceable(device_name, prop_name): + self.core.start_property_sequence(device_name, prop_name) + except Exception as ex: + ex.print_stack_trace() + raise HardwareControlException(ex.get_message()) + + def move_z_device(event): + try: + if event.is_z_sequenced(): + self.core.start_stage_sequence(z_stage) + else: + previous_z = None if self.last_event is None else None if self.last_event.get_sequence() is None else \ + self.last_event.get_sequence()[0].get_z_position() + current_z = event.get_z_position() if event.get_sequence() is None else \ + event.get_sequence()[0].get_z_position() + if current_z is None: + return + change = previous_z is None or previous_z != current_z + if not change: + return + + # Wait for it to not be busy + self.core.wait_for_device(z_stage) + # Move Z + self.core.set_position(z_stage, float(current_z)) + # Wait for move to finish + self.core.wait_for_device(z_stage) + except Exception as ex: + raise HardwareControlException(ex) + + def move_other_stage_devices(event): + try: + for stage_device_name in event.get_stage_device_names(): + # Wait for it to not be busy + self.core.wait_for_device(stage_device_name) + # Move stage device + self.core.set_position(stage_device_name, + event.get_stage_single_axis_stage_position(stage_device_name)) + # Wait for move to finish + self.core.wait_for_device(stage_device_name) + except Exception as ex: + raise HardwareControlException(ex) + + def change_exposure(event): + try: + if event.is_exposure_sequenced(): + self.core.start_exposure_sequence(self.core.get_camera_device()) + else: + current_exposure = event.get_exposure() + prev_exposure = None if self.last_event is None else self.last_event.get_exposure() + change_exposure = current_exposure is not None and (prev_exposure is None or + not prev_exposure == current_exposure) + if change_exposure: + self.core.setExposure(current_exposure) + except Exception as ex: + raise HardwareControlException(ex) + + def set_slm_pattern(event): + try: + slm_image = event.get_slm_image() + if slm_image is not None: + if isinstance(slm_image, bytes): + self.core.get_slm_image(slm, slm_image) + elif isinstance(slm_image, list) and all(isinstance(i, int) for i in slm_image): + self.core.get_slm_image(slm, slm_image) + else: + raise ValueError("SLM api only supports 8 bit and 32 bit patterns") + except Exception as ex: + raise HardwareControlException(ex) + + def loop_hardware_command_retries(r, command_name): + for i in range(HARDWARE_ERROR_RETRIES): + try: + r() + return + except Exception as e: + self.core.log_message(traceback.format_exc()) + print(self.get_current_date_and_time() + ": Problem " + command_name + "\n Retry #" + str( + i) + " in " + str(DELAY_BETWEEN_RETRIES_MS) + " ms") + time.sleep(DELAY_BETWEEN_RETRIES_MS / 1000) + raise HardwareControlException(command_name + " unsuccessful") + + def change_additional_properties(event): + try: + for s in event.get_additional_properties(): + self.core.setProperty(s[0], s[1], s[2]) + except Exception as ex: + raise HardwareControlException(ex.getMessage()) + + try: + # Get the hardware specific to this acquisition + xy_stage = self.core.get_xy_stage_device() + z_stage = self.core.get_focus_device() + slm = self.core.get_slm_device() + + # Prepare sequences if applicable + if event.get_sequence() is not None: + z_sequence = pymmcore.DoubleVector() if event.is_z_sequenced() else None + x_sequence = pymmcore.DoubleVector() if event.is_xy_sequenced() else None + y_sequence = pymmcore.DoubleVector() if event.is_xy_sequenced() else None + exposure_sequence_ms = pymmcore.DoubleVector() if event.is_exposure_sequenced() else None + group = event.get_sequence()[0].get_config_group() + config = self.core.get_config_data(group, event.get_sequence()[0].get_config_preset()) if event.get_sequence()[0].get_config_preset() is not None else None + prop_sequences = [] if event.is_config_group_sequenced() else None + + for e in event.get_sequence(): + if z_sequence is not None: + z_sequence.add(e.get_z_position()) + if x_sequence is not None: + x_sequence.add(e.get_x_position()) + if y_sequence is not None: + y_sequence.add(e.get_y_position()) + if exposure_sequence_ms is not None: + exposure_sequence_ms.add(e.get_exposure()) + + # Set sequences for all channel properties + if prop_sequences is not None: + for i in range(config.size()): + ps = config.get_setting(i) + device_name = ps.get_device_label() + prop_name = ps.get_property_name() + + if e == event.get_sequence()[0]: # First property + # TODO: what is this in pymmcore + prop_sequences.add(StrVector()) + + channel_preset_config = self.core.get_config_data(group, e.get_config_preset()) + prop_value = channel_preset_config.get_setting(device_name, prop_name).get_property_value() + + if self.core.is_property_sequenceable(device_name, prop_name): + prop_sequences.get(i).add(prop_value) + + hardware_sequences_in_progress.device_names.append(self.core.get_camera_device()) + + # Now have built up all the sequences, apply them + if event.is_exposure_sequenced(): + self.core.load_exposure_sequence(self.core.get_camera_device(), exposure_sequence_ms) + # Already added camera + + if event.is_xy_sequenced(): + self.core.load_xy_stage_sequence(xy_stage, x_sequence, y_sequence) + hardware_sequences_in_progress.device_names.add(xy_stage) + + if event.is_z_sequenced(): + self.core.load_stage_sequence(z_stage, z_sequence) + hardware_sequences_in_progress.device_names.add(z_stage) + + if event.is_config_group_sequenced(): + for i in range(config.size()): + ps = config.get_setting(i) + device_name = ps.get_device_label() + prop_name = ps.get_property_name() + + if prop_sequences.get(i).size() > 0: + self.core.load_property_sequence(device_name, prop_name, prop_sequences.get(i)) + hardware_sequences_in_progress.property_names.add(prop_name) + hardware_sequences_in_progress.property_device_names.add(device_name) + + self.core.prepare_sequence_acquisition(self.core.get_camera_device()) + + # Compare to last event to see what needs to change + if self.last_event is not None and self.last_event.acquisition_ != event.acquisition_: + self.last_event = None # Update all hardware if switching to a new acquisition + + # Z stage + loop_hardware_command_retries(lambda: move_z_device(event), "Moving Z device") + # Other stage devices + loop_hardware_command_retries(lambda: move_other_stage_devices(event), "Moving other stage devices") + # XY Stage + loop_hardware_command_retries(lambda: move_xy_stage(event), "Moving XY stage") + # Channels + loop_hardware_command_retries(lambda: change_channels(event), "Changing channels") + # Camera exposure + loop_hardware_command_retries(lambda: change_exposure(event), "Changing exposure") + # SLM + loop_hardware_command_retries(lambda: set_slm_pattern(event), "Setting SLM pattern") + # Arbitrary Properties + loop_hardware_command_retries(lambda: change_additional_properties(event), "Changing additional properties") + # Keep track of last event + self.last_event = event if event.get_sequence() is None else event.get_sequence()[-1] + except: + traceback.print_exc() + raise HardwareControlException("Error executing event") + + def get_current_date_and_time(self): + return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S") + + def is_sequencable(self, previous_events, next_event, new_seq_length): + try: + if next_event.is_acquisition_sequence_end_event() or next_event.is_acquisition_finished_event(): + return False + + previous_event = previous_events[-1] + + # check all properties in group + if previous_event.get_config_preset() is not None and next_event.get_config_preset() is not None and \ + previous_event.get_config_preset() != next_event.get_config_preset(): + # check all properties in the channel + config1 = self.core.get_config_data(previous_event.get_config_group(), + previous_event.get_config_preset()) + config2 = self.core.get_config_data(next_event.get_config_group(), next_event.get_config_preset()) + for i in range(config1.size()): + ps1 = config1.get_setting(i) + device_name = ps1.get_device_label() + prop_name = ps1.get_property_name() + prop_value1 = ps1.get_property_value() + ps2 = config2.get_setting(i) + prop_value2 = ps2.get_property_value() + if prop_value1 != prop_value2: + if not self.core.is_property_sequenceable(device_name, prop_name): + return False + if self.core.get_property_sequence_max_length(device_name, prop_name) < new_seq_length: + return False + + # TODO check for arbitrary additional properties in the acq event for being sequencable + + # z stage + if previous_event.get_z_position() is not None and next_event.get_z_position() is not None and \ + previous_event.get_z_position() != next_event.get_z_position(): + if not self.core.is_stage_sequenceable(self.core.get_focus_device()): + return False + if new_seq_length > self.core.get_stage_sequence_max_length(self.core.get_focus_device()): + return False + + # arbitrary z stages + # TODO implement sequences along arbitrary other stage devices + for stage_device in previous_event.get_stage_device_names(): + return False + + # xy stage + if (previous_event.get_x_position() is not None and next_event.get_x_position() is not None and + previous_event.get_x_position() != next_event.get_x_position()) or \ + (previous_event.get_y_position() is not None and next_event.get_y_position() is not None and + previous_event.get_y_position() != next_event.get_y_position()): + if not self.core.is_xy_stage_sequenceable(self.core.get_xy_stage_device()): + return False + if new_seq_length > self.core.get_xy_stage_sequence_max_length(self.core.get_xy_stage_device()): + return False + + if previous_event.get_camera_device_name() is None: + # Using the Core-Camera, the default + + # camera exposure + if previous_event.get_exposure() is not None and next_event.get_exposure() is not None and \ + previous_event.get_exposure() != next_event.get_exposure() and \ + not self.core.is_exposure_sequenceable(self.core.get_camera_device()): + return False + if self.core.is_exposure_sequenceable(self.core.get_camera_device()) and \ + new_seq_length > self.core.get_exposure_sequence_max_length(self.core.get_camera_device()): + return False + + # If there is a nonzero delay between events, then its not sequencable + if previous_event.get_t_index() is not None and next_event.get_t_index() is not None and \ + previous_event.get_t_index() != next_event.get_t_index(): + if previous_event.get_minimum_start_time_absolute() is not None and \ + next_event.get_minimum_start_time_absolute() is not None and \ + previous_event.get_minimum_start_time_absolute() != next_event.get_minimum_start_time_absolute(): + return False + + return True + except Exception as ex: + raise RuntimeError(ex) + + def merge_sequence_event(self, event_list): + if len(event_list) == 1: + return event_list[0] + return AcquisitionEvent(event_list[0].acquisition_, event_list) + diff --git a/pycromanager/acquisition/acq_eng_py/internal/hardware_sequences.py b/pycromanager/acquisition/acq_eng_py/internal/hardware_sequences.py new file mode 100644 index 00000000..e91d8c12 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/internal/hardware_sequences.py @@ -0,0 +1,5 @@ +class HardwareSequences: + def __init__(self): + self.device_names = [] + self.property_names = [] + self.property_device_names = [] \ No newline at end of file diff --git a/pycromanager/acquisition/acq_eng_py/internal/notification_handler.py b/pycromanager/acquisition/acq_eng_py/internal/notification_handler.py new file mode 100644 index 00000000..c84ccc07 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/internal/notification_handler.py @@ -0,0 +1,31 @@ +from queue import Queue +import threading + +class NotificationHandler: + def __init__(self): + self.notification_queue = Queue() + self.listeners = [] + self.run_thread = threading.Thread(target=self.run) + self.run_thread.start() + + def run(self): + events_finished = False + data_sink_finished = False + while True: + n = self.notification_queue.get() + for listener in self.listeners: + listener.post_notification(n) + if n.is_acquisition_finished_notification(): + events_finished = True + if n.is_data_sink_finished_notification(): + data_sink_finished = True + if events_finished and data_sink_finished: + break + + def post_notification(self, notification): + self.notification_queue.put(notification) + if self.notification_queue.qsize() > 500: + print(f"Warning: Acquisition notification queue size: {self.notification_queue.qsize()}") + + def add_listener(self, listener): + self.listeners.append(listener) diff --git a/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py b/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py new file mode 100644 index 00000000..bb2c29f3 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/main/AcqEngPy_Acquisition.py @@ -0,0 +1,262 @@ +import json +import queue +import traceback +import threading + +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata +from pycromanager.acquisition.acq_eng_py.internal.engine import Engine +from pycromanager.acquisition.acq_eng_py.main.acq_notification import AcqNotification +from pycromanager.acquisition.acq_eng_py.internal.notification_handler import NotificationHandler + + + +class Acquisition(): + + EVENT_GENERATION_HOOK = 0 + # This hook runs before changes to the hardware (corresponding to the instructions in the + # event) are made + BEFORE_HARDWARE_HOOK = 1 + # This hook runs after changes to the hardware took place, but before camera exposure + # (either a snap or a sequence) is started + AFTER_HARDWARE_HOOK = 2 + # Hook runs after the camera sequence acquisition has started. This can be used for + # external triggering of the camera + AFTER_CAMERA_HOOK = 3 + # Hook runs after the camera exposure ended (when possible, before readout of the camera + # and availability of the images in memory). + AFTER_EXPOSURE_HOOK = 4 + + IMAGE_QUEUE_SIZE = 30 + + def __init__(self, sink, summary_metadata_processor=None, initialize=True): + self.xy_stage_ = None + self.events_finished_ = threading.Event() + self.abort_requested_ = threading.Event() + self.start_time_ms_ = -1 + self.paused_ = False + self.event_generation_hooks_ = [] + self.before_hardware_hooks_ = [] + self.after_hardware_hooks_ = [] + self.after_camera_hooks_ = [] + self.after_exposure_hooks_ = [] + self.image_processors_ = [] + self.first_dequeue_ = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) + self.processor_output_queues_ = {} + self.debug_mode_ = False + self.abort_exception_ = None + self.image_metadata_processor_ = None + self.notification_handler_ = NotificationHandler() + self.started_ = False + self.core_ = Engine.get_core() + self.summary_metadata_processor_ = summary_metadata_processor + self.data_sink_ = sink + if initialize: + self.initialize() + + def post_notification(self, notification): + self.notification_handler_.post_notification(notification) + + def add_acq_notification_listener(self, post_notification_fn): + self.notification_handler_.add_listener(post_notification_fn) + + def get_data_sink(self): + return self.data_sink_ + + def set_debug_mode(self, debug): + self.debug_mode_ = debug + + def is_debug_mode(self): + return self.debug_mode_ + + def is_abort_requested(self): + return self.abort_requested_.is_set() + + def abort(self, e=None): + if e: + self.abort_exception_ = e + if self.abort_requested_.is_set(): + return + self.abort_requested_.set() + if self.is_paused(): + self.set_paused(False) + Engine.get_instance().finish_acquisition(self) + + def check_for_exceptions(self): + if self.abort_exception_: + raise self.abort_exception_ + + def add_to_summary_metadata(self, summary_metadata): + if self.summary_metadata_processor_: + self.summary_metadata_processor_(summary_metadata) + + def add_to_image_metadata(self, tags): + if self.image_metadata_processor_: + self.image_metadata_processor_(tags) + + def add_tags_to_tagged_image(self, tags, more_tags): + if not more_tags: + return + more_tags_object = json.loads(json.dumps(more_tags)) + tags['AcqEngMetadata.TAGS'] = more_tags_object + + def submit_event_iterator(self, evt): + if not self.started_: + self.start() + return Engine.get_instance().submit_event_iterator(evt) + + def start_saving_thread(self): + def saving_thread(acq): + try: + while True: + if acq.debug_mode_: + acq.core_.log_message(f"Image queue size: {len(acq.first_dequeue_)}") + if not acq.image_processors_: + if acq.debug_mode_: + acq.core_.log_message("waiting for image to save") + img = acq.first_dequeue_.get() + if acq.debug_mode_: + acq.core_.log_message("got image to save") + if img.tags is None and img.pix is None: + break + acq.save_image(img) + else: + img = acq.processor_output_queues_[acq.image_processors_[-1]].get() + if acq.data_sink_: + if acq.debug_mode_: + acq.core_.log_message("Saving image") + if not img.pix and not img.tags: + break + acq.save_image(img) + if acq.debug_mode_: + acq.core_.log_message("Finished saving image") + except Exception as ex: + traceback.print_exc() + acq.abort(ex) + finally: + acq.save_image(acq.core_.TaggedImage(None, None)) + + threading.Thread(target=saving_thread, args=(self,)).start() + + def add_image_processor(self, p): + if self.started_: + raise RuntimeError("Cannot add processor after acquisition started") + self.image_processors_.append(p) + self.processor_output_queues_[p] = queue.Queue(maxsize=self.IMAGE_QUEUE_SIZE) + if len(self.image_processors_) == 1: + p.set_acq_and_queues(self, self.first_dequeue_, self.processor_output_queues_[p]) + else: + p.set_acq_and_queues(self, self.processor_output_queues_[self.image_processors_[-2]], + self.processor_output_queues_[self.image_processors_[-1]]) + + def add_hook(self, h, type_): + if self.started_: + raise RuntimeError("Cannot add hook after acquisition started") + if type_ == self.EVENT_GENERATION_HOOK: + self.event_generation_hooks_.append(h) + elif type_ == self.BEFORE_HARDWARE_HOOK: + self.before_hardware_hooks_.append(h) + elif type_ == self.AFTER_HARDWARE_HOOK: + self.after_hardware_hooks_.append(h) + elif type_ == self.AFTER_CAMERA_HOOK: + self.after_camera_hooks_.append(h) + elif type_ == self.AFTER_EXPOSURE_HOOK: + self.after_exposure_hooks_.append(h) + + def initialize(self): + if self.core_: + summary_metadata = AcqEngMetadata.make_summary_metadata(self.core_, self) + self.add_to_summary_metadata(summary_metadata) + try: + self.summary_metadata_ = summary_metadata + except json.JSONDecodeError: + print("Couldn't copy summary metadata") + if self.data_sink_: + self.data_sink_.initialize(self, summary_metadata) + + def start(self): + if self.data_sink_: + self.start_saving_thread() + self.post_notification(AcqNotification.create_acq_started_notification()) + self.started_ = True + + def save_image(self, image): + if image.tags is None and image.pix is None: + self.data_sink_.finish() + self.post_notification(AcqNotification.create_data_sink_finished_notification()) + else: + self.data_sink_.put_image(image) + axes = AcqEngMetadata.get_axes(image.tags) + self.post_notification(AcqNotification.create_image_saved_notification(axes)) + + def get_start_time_ms(self): + return self.start_time_ms_ + + def set_start_time_ms(self, time): + self.start_time_ms_ = time + + def is_paused(self): + return self.paused_ + + def is_started(self): + return self.started_ + + def set_paused(self, pause): + self.paused_ = pause + + def get_summary_metadata(self): + return self.summary_metadata_ + + def anything_acquired(self): + return not self.data_sink_ or self.data_sink_.anything_acquired() + + def add_image_metadata_processor(self, processor): + if not self.image_metadata_processor_: + self.image_metadata_processor_ = processor + else: + raise RuntimeError("Multiple metadata processors not supported") + + def get_event_generation_hooks(self): + return self.event_generation_hooks_ + + def get_before_hardware_hooks(self): + return self.before_hardware_hooks_ + + def get_after_hardware_hooks(self): + return self.after_hardware_hooks_ + + def get_after_camera_hooks(self): + return self.after_camera_hooks_ + + def get_after_exposure_hooks(self): + return self.after_exposure_hooks_ + + def add_to_output(self, ti): + try: + if ti.tags is None and ti.pix is None: + self.events_finished_.set() + self.first_dequeue_.put(ti) + except Exception as ex: + raise RuntimeError(ex) + + def finish(self): + Engine.get_instance().finish_acquisition(self) + + def are_events_finished(self): + return self.events_finished_.is_set() + + def block_until_events_finished(self, timeout=None): + """Blocks until all events have been processed.""" + self.events_finished_.wait(timeout) + + def block_unless_aborted(self, timeout_ms=None): + """Blocks until acquisition is aborted.""" + self.abort_requested_.wait(timeout_ms / 1000) + + + def get_image_transfer_queue_size(self): + return self.IMAGE_QUEUE_SIZE + + def get_image_transfer_queue_count(self): + return len(self.first_dequeue_) + + diff --git a/pycromanager/acquisition/acq_eng_py/main/__init__.py b/pycromanager/acquisition/acq_eng_py/main/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py b/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py new file mode 100644 index 00000000..9f10aaeb --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/main/acq_eng_metadata.py @@ -0,0 +1,795 @@ +import datetime +import json +import traceback +import numpy as np + +class AcqEngMetadata: + + CHANNEL_GROUP = "ChannelGroup" + CORE_AUTOFOCUS_DEVICE = "Core-Autofocus" + CORE_CAMERA = "Core-Camera" + CORE_GALVO = "Core-Galvo" + CORE_IMAGE_PROCESSOR = "Core-ImageProcessor" + CORE_SLM = "Core-SLM" + CORE_SHUTTER = "Core-Shutter" + WIDTH = "Width" + HEIGHT = "Height" + PIX_SIZE = "PixelSize_um" + POS_NAME = "PositionName" + X_UM_INTENDED = "XPosition_um_Intended" + Y_UM_INTENDED = "YPosition_um_Intended" + Z_UM_INTENDED = "ZPosition_um_Intended" + GENERIC_UM_INTENDED_SUFFIX = "Position_um_Intended" + X_UM = "XPosition_um" + Y_UM = "YPosition_um" + Z_UM = "ZPosition_um" + EXPOSURE = "Exposure" + CHANNEL_NAME = "Channel" + ZC_ORDER = "SlicesFirst" # this is called ZCT in the functions + TIME = "Time" + DATE_TIME = "DateAndTime" + SAVING_PREFIX = "Prefix" + INITIAL_POS_LIST = "InitialPositionList" + TIMELAPSE_INTERVAL = "Interval_ms" + PIX_TYPE = "PixelType" + BIT_DEPTH = "BitDepth" + ELAPSED_TIME_MS = "ElapsedTime-ms" + Z_STEP_UM = "z-step_um" + EXPLORE_ACQUISITION = "ExploreAcquisition" + AXES_GRID_COL = "column" + AXES_GRID_ROW = "row" + OVERLAP_X = "GridPixelOverlapX" + OVERLAP_Y = "GridPixelOverlapY" + AFFINE_TRANSFORM = "AffineTransform" + PIX_TYPE_GRAY8 = "GRAY8" + PIX_TYPE_GRAY16 = "GRAY16" + CORE_XYSTAGE = "Core-XYStage" + CORE_FOCUS = "Core-Focus" + AXES = "Axes" + CHANNEL_AXIS = "channel" + TIME_AXIS = "time" + Z_AXIS = "z" + POSITION_AXIS = "position" + TAGS = "tags" + ACQUISITION_EVENT = "Event" + + @staticmethod + def add_image_metadata(core, tags, event, elapsed_ms, exposure): + try: + AcqEngMetadata.set_pixel_size_um(tags, core.get_pixel_size_um()) + + # Date and time + AcqEngMetadata.set_elapsed_time_ms(tags, elapsed_ms) + AcqEngMetadata.set_image_time(tags, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S -')) + + # Info about all hardware that the core specifically knows about + AcqEngMetadata.create_axes(tags) + + # Axes positions + for s in event.get_defined_axes(): + AcqEngMetadata.set_axis_position(tags, s, event.get_axis_position(s)) + + # XY Stage Positions + if event.get_x_position() is not None and event.get_y_position() is not None: + AcqEngMetadata.set_stage_x_intended(tags, event.get_x_position()) + AcqEngMetadata.set_stage_y_intended(tags, event.get_y_position()) + if event.get_position_name() is not None: + AcqEngMetadata.set_position_name(tags, event.get_position_name()) + + if event.get_z_position() is not None: + AcqEngMetadata.set_stage_z_intended(tags, event.get_z_position()) + elif event.get_stage_single_axis_stage_position(core.get_focus_device()) is not None: + AcqEngMetadata.set_stage_z_intended(tags, + event.get_stage_single_axis_stage_position(core.get_focus_device())) + + for name in event.get_stage_device_names(): + if name != core.get_focus_device(): + AcqEngMetadata.set_stage_position_intended(tags, name, + event.get_stage_single_axis_stage_position(name)) + + if event.get_sequence() is not None: + AcqEngMetadata.add_acquisition_event(tags, event) + + AcqEngMetadata.set_exposure(tags, exposure) + + except Exception as e: + traceback.print_exc() + raise RuntimeError("Problem adding image metadata") + + @staticmethod + def add_acquisition_event(tags, event): + tags[AcqEngMetadata.ACQUISITION_EVENT] = event.toJSON() + + + @staticmethod + def make_summary_metadata(core, acq): + summary = json.loads("{}") + + AcqEngMetadata.set_acq_date(summary, AcqEngMetadata.get_current_date_and_time()) + + # General information the core-camera + byte_depth = int(core.get_bytes_per_pixel()) + if byte_depth == 0: + raise RuntimeError("Camera byte depth cannot be zero") + AcqEngMetadata.set_pixel_type_from_byte_depth(summary, byte_depth) + AcqEngMetadata.set_pixel_size_um(summary, core.get_pixel_size_um()) + + # Info about core devices + try: + AcqEngMetadata.set_core_xy(summary, core.get_xy_stage_device()) + AcqEngMetadata.set_core_focus(summary, core.get_focus_device()) + AcqEngMetadata.set_core_autofocus(summary, core.get_auto_focus_device()) + AcqEngMetadata.set_core_camera(summary, core.get_camera_device()) + AcqEngMetadata.set_core_galvo(summary, core.get_galvo_device()) + AcqEngMetadata.set_core_image_processor(summary, core.get_image_processor_device()) + AcqEngMetadata.set_core_slm(summary, core.get_slm_device()) + AcqEngMetadata.set_core_shutter(summary, core.get_shutter_device()) + except Exception as e: + raise RuntimeError("couldn't get info from core about devices") + + # TODO restore + # # Affine transform + # if AffineTransformUtils.isAffineTransformDefined(): + # at = AffineTransformUtils.getAffineTransform(0, 0) + # AcqEngMetadata.setAffineTransformString(summary, AffineTransformUtils.transformToString(at)) + # else: + # AcqEngMetadata.setAffineTransformString(summary, "Undefined") + + return summary + + + @staticmethod + def get_current_date_and_time(): + return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S") + + @staticmethod + def get_indices(image_label): + s = image_label.split("_") + return [int(i) for i in s] + + @staticmethod + def copy(map): + return json.loads(json.dumps(map)) + + @staticmethod + def set_core_xy(map, xy_name): + map[AcqEngMetadata.CORE_XYSTAGE] = xy_name + + @staticmethod + def has_core_xy(map): + return AcqEngMetadata.CORE_XYSTAGE in map + + @staticmethod + def get_core_xy(map): + if AcqEngMetadata.CORE_XYSTAGE in map: + return map[AcqEngMetadata.CORE_XYSTAGE] + else: + raise ValueError("Missing core xy stage tag") + + @staticmethod + def set_core_focus(map, z_name): + map[AcqEngMetadata.CORE_FOCUS] = z_name + + @staticmethod + def has_core_focus(map): + return AcqEngMetadata.CORE_FOCUS in map + + @staticmethod + def get_core_focus(map): + if AcqEngMetadata.CORE_FOCUS in map: + return map[AcqEngMetadata.CORE_FOCUS] + else: + raise ValueError("Missing core focus tag") + + @staticmethod + def set_acq_date(map, date_time): + map[AcqEngMetadata.DATE_TIME] = date_time + + @staticmethod + def is_explore_acq(summary_metadata): + if AcqEngMetadata.EXPLORE_ACQUISITION in summary_metadata: + return summary_metadata[AcqEngMetadata.EXPLORE_ACQUISITION] + else: + raise ValueError("Missing explore tag") + + @staticmethod + def set_explore_acq(summary_metadata, b): + summary_metadata[AcqEngMetadata.EXPLORE_ACQUISITION] = b + + @staticmethod + def has_acq_date(map): + return AcqEngMetadata.DATE_TIME in map + + @staticmethod + def get_acq_date(map): + if AcqEngMetadata.DATE_TIME in map: + return map[AcqEngMetadata.DATE_TIME] + else: + raise ValueError("Missing Acq dat time tag") + + @staticmethod + def set_bit_depth(map, bit_depth): + map[AcqEngMetadata.BIT_DEPTH] = bit_depth + + @staticmethod + def has_bit_depth(map): + return AcqEngMetadata.BIT_DEPTH in map + + @staticmethod + def get_bit_depth(map): + try: + return map[AcqEngMetadata.BIT_DEPTH] + except KeyError: + raise ValueError("Missing bit depth tag") + + @staticmethod + def set_width(map, width): + map[AcqEngMetadata.WIDTH] = width + + @staticmethod + def has_width(map): + return AcqEngMetadata.WIDTH in map + + @staticmethod + def get_width(map): + try: + return map[AcqEngMetadata.WIDTH] + except KeyError: + raise ValueError("Image width tag missing") + + @staticmethod + def set_height(map, height): + map[AcqEngMetadata.HEIGHT] = height + + @staticmethod + def has_height(map): + return AcqEngMetadata.HEIGHT in map + + @staticmethod + def get_height(map): + try: + return map[AcqEngMetadata.HEIGHT] + except KeyError: + raise ValueError("Height missing from image tags") + + @staticmethod + def set_position_name(map, position_name): + map[AcqEngMetadata.POS_NAME] = position_name + + @staticmethod + def has_position_name(map): + return AcqEngMetadata.POS_NAME in map + + @staticmethod + def get_position_name(map): + try: + return map[AcqEngMetadata.POS_NAME] + except KeyError: + raise ValueError("Missing position name tag") + + @staticmethod + def set_pixel_type_from_string(map, pixel_type): + map[AcqEngMetadata.PIX_TYPE] = pixel_type + + @staticmethod + def set_pixel_type_from_byte_depth(map, depth): + try: + if depth == 1: + map[AcqEngMetadata.PIX_TYPE] = AcqEngMetadata.PIX_TYPE_GRAY8 + elif depth == 2: + map[AcqEngMetadata.PIX_TYPE] = AcqEngMetadata.PIX_TYPE_GRAY16 + elif depth == 4: + map[AcqEngMetadata.PIX_TYPE] = AcqEngMetadata.PIX_TYPE_RGB32 + except KeyError: + raise ValueError("Couldn't set pixel type") + + @staticmethod + def has_pixel_type(map): + return AcqEngMetadata.PIX_TYPE in map + + @staticmethod + def get_pixel_type(map): + try: + return map[AcqEngMetadata.PIX_TYPE] + except KeyError: + raise ValueError("Missing pixel type tag") + + @staticmethod + def get_bytes_per_pixel(map): + if AcqEngMetadata.is_gray8(map): + return 1 + elif AcqEngMetadata.is_gray16(map): + return 2 + elif AcqEngMetadata.is_rgb32(map): + return 4 + else: + return 0 + + @staticmethod + def get_number_of_components(map): + pixel_type = AcqEngMetadata.get_pixel_type(map) + if pixel_type == AcqEngMetadata.PIX_TYPE_GRAY8 or pixel_type == AcqEngMetadata.PIX_TYPE_GRAY16: + return 1 + elif pixel_type == AcqEngMetadata.PIX_TYPE_RGB32: + return 3 + else: + raise ValueError("Invalid pixel type") + + @staticmethod + def is_gray8(map): + return AcqEngMetadata.get_pixel_type(map) == AcqEngMetadata.PIX_TYPE_GRAY8 + + @staticmethod + def is_gray16(map): + return AcqEngMetadata.get_pixel_type(map) == AcqEngMetadata.PIX_TYPE_GRAY16 + + @staticmethod + def is_rgb32(map): + return AcqEngMetadata.get_pixel_type(map) == AcqEngMetadata.PIX_TYPE_RGB32 + + @staticmethod + def is_gray(map): + return AcqEngMetadata.is_gray8(map) or AcqEngMetadata.is_gray16(map) + + @staticmethod + def is_rgb(map): + return AcqEngMetadata.is_rgb32(map) + + @staticmethod + def get_keys(md): + n = len(md) + key_array = [None] * n + keys = md.keys() + for i in range(n): + key_array[i] = keys.next() + return key_array + + @staticmethod + def get_json_array_member(obj, key): + try: + return obj[key] + except KeyError: + raise ValueError("Missing JSONArray member") + + @staticmethod + def set_image_time(map, time): + try: + map[AcqEngMetadata.TIME] = time + except KeyError: + raise ValueError("Couldn't set image time") + + @staticmethod + def has_image_time(map): + return AcqEngMetadata.TIME in map + + @staticmethod + def get_image_time(map): + try: + return map[AcqEngMetadata.TIME] + except KeyError: + raise ValueError("Missing image time tag") + + @staticmethod + def get_depth(tags): + pixel_type = AcqEngMetadata.get_pixel_type(tags) + if AcqEngMetadata.PIX_TYPE_GRAY8 in pixel_type: + return 1 + elif AcqEngMetadata.PIX_TYPE_GRAY16 in pixel_type: + return 2 + else: + return 0 + + @staticmethod + def set_exposure(map, exp): + try: + map[AcqEngMetadata.EXPOSURE] = exp + except KeyError: + raise ValueError("Could not set exposure") + + @staticmethod + def has_exposure(map): + return AcqEngMetadata.EXPOSURE in map + + @staticmethod + def get_exposure(map): + try: + return map[AcqEngMetadata.EXPOSURE] + except KeyError: + raise ValueError("Exposure tag missing") + + @staticmethod + def set_pixel_size_um(map, val): + try: + map[AcqEngMetadata.PIX_SIZE] = val + except KeyError: + raise ValueError("Missing pixel size tag") + + @staticmethod + def has_pixel_size_um(map): + return AcqEngMetadata.PIX_SIZE in map + + @staticmethod + def get_pixel_size_um(map): + try: + return map[AcqEngMetadata.PIX_SIZE] + except KeyError: + raise ValueError("Pixel size missing in metadata") + + @staticmethod + def set_z_step_um(map, val): + try: + map[AcqEngMetadata.Z_STEP_UM] = val + except KeyError: + raise ValueError("Couldn't set z step tag") + + @staticmethod + def has_z_step_um(map): + return AcqEngMetadata.Z_STEP_UM in map + + @staticmethod + def get_z_step_um(map): + try: + return map[AcqEngMetadata.Z_STEP_UM] + except KeyError: + raise ValueError("Z step metadata field missing") + + @staticmethod + def set_z_position_um(map, val): + try: + map[AcqEngMetadata.Z_UM] = val + except KeyError: + raise ValueError("Couldn't set z position") + + @staticmethod + def has_z_position_um(map): + return AcqEngMetadata.Z_UM in map + + @staticmethod + def get_z_position_um(map): + try: + return map[AcqEngMetadata.Z_UM] + except KeyError: + raise ValueError("Missing Z position tag") + + @staticmethod + def set_elapsed_time_ms(map, val): + try: + map[AcqEngMetadata.ELAPSED_TIME_MS] = val + except KeyError: + raise ValueError("Couldn't set elapsed time") + + @staticmethod + def has_elapsed_time_ms(map): + return AcqEngMetadata.ELAPSED_TIME_MS in map + + @staticmethod + def get_elapsed_time_ms(map): + try: + return map[AcqEngMetadata.ELAPSED_TIME_MS] + except KeyError: + raise RuntimeError("missing elapsed time tag") + + @staticmethod + def set_interval_ms(map, val): + map[AcqEngMetadata.TIMELAPSE_INTERVAL] = val + + @staticmethod + def has_interval_ms(map): + return AcqEngMetadata.TIMELAPSE_INTERVAL in map + + @staticmethod + def get_interval_ms(map): + try: + return map[AcqEngMetadata.TIMELAPSE_INTERVAL] + except KeyError: + raise RuntimeError("Time interval missing from summary metadata") + + @staticmethod + def set_zct_order(map, val): + map[AcqEngMetadata.ZC_ORDER] = val + + @staticmethod + def has_zct_order(map): + return AcqEngMetadata.ZC_ORDER in map + + @staticmethod + def get_zct_order(map): + try: + return map[AcqEngMetadata.ZC_ORDER] + except KeyError: + raise RuntimeError("Missing ZCT Tag") + + @staticmethod + def set_affine_transform_string(summary_md, affine): + summary_md[AcqEngMetadata.AFFINE_TRANSFORM] = affine + + @staticmethod + def has_affine_transform_string(map): + return AcqEngMetadata.AFFINE_TRANSFORM in map + + @staticmethod + def get_affine_transform_string(summary_md): + try: + return summary_md[AcqEngMetadata.AFFINE_TRANSFORM] + except KeyError: + raise RuntimeError("Affine transform missing from summary metadata") + + @staticmethod + def get_affine_transform(summary_md): + try: + return AcqEngMetadata.string_to_transform(summary_md[AcqEngMetadata.AFFINE_TRANSFORM]) + except KeyError: + raise RuntimeError("Affine transform missing from summary metadata") + + @staticmethod + def string_to_transform(s): + if s == "Undefined": + return None + mat = [0] * 4 + vals = s.split("_") + for i in range(4): + mat[i] = float(vals[i]) + return AcqEngMetadata.AffineTransform(mat) + + @staticmethod + def set_pixel_overlap_x(smd, overlap): + smd[AcqEngMetadata.OVERLAP_X] = overlap + + @staticmethod + def has_pixel_overlap_x(map): + return AcqEngMetadata.OVERLAP_X in map + + @staticmethod + def get_pixel_overlap_x(summary_md): + try: + return summary_md[AcqEngMetadata.OVERLAP_X] + except KeyError: + raise RuntimeError("Could not find pixel overlap in image tags") + + @staticmethod + def set_pixel_overlap_y(smd, overlap): + smd[AcqEngMetadata.OVERLAP_Y] = overlap + + @staticmethod + def has_pixel_overlap_y(map): + return AcqEngMetadata.OVERLAP_Y in map + + @staticmethod + def get_pixel_overlap_y(summary_md): + try: + return summary_md[AcqEngMetadata.OVERLAP_Y] + except KeyError: + raise RuntimeError("Could not find pixel overlap in image tags") + + @staticmethod + def set_stage_x_intended(smd, x): + smd[AcqEngMetadata.X_UM_INTENDED] = x + + @staticmethod + def has_stage_x_intended(map): + return AcqEngMetadata.X_UM_INTENDED in map + + @staticmethod + def get_stage_x_intended(smd): + try: + return smd[AcqEngMetadata.X_UM_INTENDED] + except KeyError: + raise RuntimeError("Could not get stage x") + + @staticmethod + def set_stage_y_intended(smd, y): + smd[AcqEngMetadata.Y_UM_INTENDED] = y + + @staticmethod + def has_stage_y_intended(map): + return AcqEngMetadata.Y_UM_INTENDED in map + + @staticmethod + def get_stage_y_intended(smd): + try: + return smd[AcqEngMetadata.Y_UM_INTENDED] + except KeyError: + raise RuntimeError("Could not get stage y") + + @staticmethod + def set_stage_z_intended(smd, y): + smd[AcqEngMetadata.Z_UM_INTENDED] = y + + @staticmethod + def has_stage_z_intended(map): + return AcqEngMetadata.Z_UM_INTENDED in map + + @staticmethod + def get_stage_z_intended(smd): + try: + return smd[AcqEngMetadata.Z_UM_INTENDED] + except KeyError: + raise RuntimeError("Could not get stage Z") + + @staticmethod + def set_stage_position_intended(tags, name, stage_single_axis_stage_position): + tags[name + AcqEngMetadata.GENERIC_UM_INTENDED_SUFFIX] = stage_single_axis_stage_position + + @staticmethod + def set_stage_x(smd, x): + smd[AcqEngMetadata.X_UM] = x + + @staticmethod + def has_stage_x(map): + return AcqEngMetadata.X_UM in map + + @staticmethod + def get_stage_x(smd): + try: + return smd[AcqEngMetadata.X_UM] + except KeyError: + raise RuntimeError("Could not get stage x") + + @staticmethod + def set_stage_y(smd, y): + smd[AcqEngMetadata.Y_UM] = y + + @staticmethod + def has_stage_y(map): + return AcqEngMetadata.Y_UM in map + + @staticmethod + def get_stage_y(smd): + try: + return smd[AcqEngMetadata.Y_UM] + except KeyError: + raise RuntimeError("Could not get stage y") + + @staticmethod + def set_channel_group(summary, channel_group): + summary[AcqEngMetadata.CHANNEL_GROUP] = channel_group + + @staticmethod + def has_channel_group(map): + return AcqEngMetadata.CHANNEL_GROUP in map + + @staticmethod + def get_channel_group(summary): + try: + return summary[AcqEngMetadata.CHANNEL_GROUP] + except KeyError: + raise RuntimeError("Could not find Channel Group") + + @staticmethod + def set_core_autofocus(summary, auto_focus_device): + summary[AcqEngMetadata.CORE_AUTOFOCUS_DEVICE] = auto_focus_device + + @staticmethod + def has_core_autofocus(summary): + return AcqEngMetadata.CORE_AUTOFOCUS_DEVICE in summary + + @staticmethod + def get_core_autofocus_device(summary): + try: + return summary[AcqEngMetadata.CORE_AUTOFOCUS_DEVICE] + except KeyError: + raise ValueError("Could not find autofocus device") + + @staticmethod + def set_core_camera(summary, camera_device): + summary[AcqEngMetadata.CORE_CAMERA] = camera_device + + @staticmethod + def has_core_camera(summary): + return AcqEngMetadata.CORE_CAMERA in summary + + @staticmethod + def get_core_camera(summary): + try: + return summary[AcqEngMetadata.CORE_CAMERA] + except KeyError: + raise ValueError("Could not get core camera") + + @staticmethod + def set_core_galvo(summary, galvo_device): + summary[AcqEngMetadata.CORE_GALVO] = galvo_device + + @staticmethod + def has_core_galvo(summary): + return AcqEngMetadata.CORE_GALVO in summary + + @staticmethod + def get_core_galvo(summary): + try: + return summary[AcqEngMetadata.CORE_GALVO] + except KeyError: + raise ValueError("Could not get core galvo") + + @staticmethod + def set_core_image_processor(summary, image_processor_device): + summary[AcqEngMetadata.CORE_IMAGE_PROCESSOR] = image_processor_device + + @staticmethod + def has_core_image_processor(summary): + return AcqEngMetadata.CORE_IMAGE_PROCESSOR in summary + + @staticmethod + def get_core_image_processor(summary): + try: + return summary[AcqEngMetadata.CORE_IMAGE_PROCESSOR] + except KeyError: + raise ValueError("Could not find core image processor") + + @staticmethod + def set_core_slm(summary, slm_device): + summary[AcqEngMetadata.CORE_SLM] = slm_device + + @staticmethod + def has_core_slm(summary): + return AcqEngMetadata.CORE_SLM in summary + + @staticmethod + def get_core_slm(summary): + try: + return summary[AcqEngMetadata.CORE_SLM] + except KeyError: + raise ValueError("Could not find core slm") + + @staticmethod + def set_core_shutter(summary, shutter_device): + summary[AcqEngMetadata.CORE_SHUTTER] = shutter_device + + @staticmethod + def has_core_shutter(summary): + return AcqEngMetadata.CORE_SHUTTER in summary + + @staticmethod + def get_core_shutter(summary): + try: + return summary[AcqEngMetadata.CORE_SHUTTER] + except KeyError: + raise ValueError("Could not find core shutter") + + @staticmethod + def create_axes(tags): + tags[AcqEngMetadata.AXES] = {} + + @staticmethod + def get_axes(tags): + try: + axes = tags[AcqEngMetadata.AXES] + axes_map = {} + for key in axes: + axes_map[key] = axes[key] + return axes_map + except KeyError: + raise ValueError("Could not create axes") + + @staticmethod + def get_axes_as_json(axes): + try: + axes_json = {} + for key in axes: + axes_json[key] = axes[key] + return axes_json + except KeyError: + raise ValueError("Could not convert axes to JSON") + + @staticmethod + def set_axis_position(tags, axis, position): + if position is None: + if AcqEngMetadata.has_axis(tags, axis): + del tags[AcqEngMetadata.AXES][axis] + return + if not isinstance(position, (str, int, np.int64, np.int32)): + raise ValueError("position must be String or Integer") + tags[AcqEngMetadata.AXES][axis] = position + + @staticmethod + def has_axis(tags, axis): + try: + return axis in tags[AcqEngMetadata.AXES] + except KeyError: + raise ValueError("Axes not present in metadata") + + @staticmethod + def get_axis_position(tags, axis): + try: + return tags[AcqEngMetadata.AXES][axis] + except KeyError: + raise ValueError("Could not create axes") \ No newline at end of file diff --git a/pycromanager/acquisition/acq_eng_py/main/acq_notification.py b/pycromanager/acquisition/acq_eng_py/main/acq_notification.py new file mode 100644 index 00000000..03da92c7 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/main/acq_notification.py @@ -0,0 +1,90 @@ +class AcqNotification: + + class Acquisition: + ACQ_STARTED = "acq_started" + ACQ_EVENTS_FINISHED = "acq_events_finished" + + @staticmethod + def to_string(): + return "Global" + + class Hardware: + PRE_HARDWARE = "pre_hardware" + POST_HARDWARE = "post_hardware" + + @staticmethod + def to_string(): + return "Hardware" + + class Camera: + PRE_SEQUENCE_STARTED = "pre_sequence_started" + PRE_SNAP = "pre_snap" + POST_EXPOSURE = "post_exposure" + + @staticmethod + def to_string(): + return "Camera" + + class Image: + IMAGE_SAVED = "image_saved" + DATA_SINK_FINISHED = "data_sink_finished" + + @staticmethod + def to_string(): + return "Image" + + def __init__(self, type, id, phase=None): + if type is None: + # then figure it out based on the phase + if phase in [AcqNotification.Camera.PRE_SNAP, AcqNotification.Camera.POST_EXPOSURE, + AcqNotification.Camera.PRE_SEQUENCE_STARTED]: + type = AcqNotification.Camera + elif phase in [AcqNotification.Hardware.PRE_HARDWARE, AcqNotification.Hardware.POST_HARDWARE]: + type = AcqNotification.Hardware + elif phase == AcqNotification.Image.IMAGE_SAVED: + type = AcqNotification.Image + else: + raise ValueError("Unknown phase") + self.type = type + self.phase = phase + self.id = id + + + @staticmethod + def create_acq_events_finished_notification(): + return AcqNotification(AcqNotification.Acquisition, None, AcqNotification.Acquisition.ACQ_EVENTS_FINISHED) + + @staticmethod + def create_acq_started_notification(): + return AcqNotification(AcqNotification.Acquisition, None, AcqNotification.Acquisition.ACQ_STARTED) + + @staticmethod + def create_data_sink_finished_notification(): + return AcqNotification(AcqNotification.Image, None, AcqNotification.Image.DATA_SINK_FINISHED) + + @staticmethod + def create_image_saved_notification(image_descriptor): + return AcqNotification(AcqNotification.Image, image_descriptor, AcqNotification.Image.IMAGE_SAVED) + + def to_json(self): + n = {} + n['type'] = self.type + n['phase'] = self.phase + if self.id: + n['id'] = self.id + return n + + @staticmethod + def from_json(json): + return AcqNotification(json['type'], + json['id'] if 'id' in json else None, + json['phase'] if 'phase' in json else None) + + def is_acquisition_finished_notification(self): + return self.phase == AcqNotification.Acquisition.ACQ_EVENTS_FINISHED + + def is_data_sink_finished_notification(self): + return self.phase == AcqNotification.Image.DATA_SINK_FINISHED + + def is_image_saved_notification(self): + return self.phase == AcqNotification.Image.IMAGE_SAVED diff --git a/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py b/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py new file mode 100644 index 00000000..58e12a67 --- /dev/null +++ b/pycromanager/acquisition/acq_eng_py/main/acquisition_event.py @@ -0,0 +1,449 @@ +from collections import namedtuple +import json +from pycromanager.acquisition.acq_eng_py.main.acq_eng_metadata import AcqEngMetadata + +class AcquisitionEvent: + class SpecialFlag: + ACQUISITION_FINISHED = "AcqusitionFinished" + ACQUISITION_SEQUENCE_END = "AcqusitionSequenceEnd" + + def __init__(self, acq, sequence=None): + self.acquisition_ = acq + self.axisPositions_ = {} + self.camera_ = None + self.timeout_ms_ = None + self.configGroup_ = None + self.configPreset_ = None + self.exposure_ = None + self.miniumumStartTime_ms_ = None + self.zPosition_ = None + self.xPosition_ = None + self.yPosition_ = None + self.stageCoordinates_ = {} + self.stageDeviceNamesToAxisNames_ = {} + self.tags_ = {} + self.acquireImage_ = None + self.slmImage_ = None + self.properties_ = set() + self.sequence_ = None + self.xySequenced_ = False + self.zSequenced_ = False + self.exposureSequenced_ = False + self.configGroupSequenced_ = False + self.specialFlag_ = None + + if sequence: + self.acquisition_ = sequence[0].acquisition_ + self.miniumumStartTime_ms_ = sequence[0].miniumumStartTime_ms_ + self.sequence_ = list(sequence) + zPosSet = set() + xPosSet = set() + yPosSet = set() + exposureSet = set() + configSet = set() + for event in self.sequence_: + if event.zPosition_: + zPosSet.add(event.get_z_position()) + if event.xPosition_: + xPosSet.add(event.get_x_position()) + if event.yPosition_: + yPosSet.add(event.get_y_position()) + if event.exposure_: + exposureSet.add(event.get_exposure()) + if event.configPreset_: + configSet.add(event.get_config_preset()) + self.exposureSequenced_ = len(exposureSet) > 1 + self.configGroupSequenced_ = len(configSet) > 1 + self.xySequenced_ = len(xPosSet) > 1 and len(yPosSet) > 1 + self.zSequenced_ = len(zPosSet) > 1 + if sequence[0].exposure_ and not self.exposureSequenced_: + self.exposure_ = sequence[0].exposure_ + + + def copy(self): + e = AcquisitionEvent(self.acquisition_) + e.axisPositions_ = self.axisPositions_.copy() + e.configPreset_ = self.configPreset_ + e.configGroup_ = self.configGroup_ + e.stageCoordinates_ = self.stageCoordinates_.copy() + e.stageDeviceNamesToAxisNames_ = self.stageDeviceNamesToAxisNames_.copy() + e.xPosition_ = self.xPosition_ + e.yPosition_ = self.yPosition_ + e.miniumumStartTime_ms_ = self.miniumumStartTime_ms_ + e.slmImage_ = self.slmImage_ + e.acquireImage_ = self.acquireImage_ + e.properties_ = set(self.properties_) + e.camera_ = self.camera_ + e.timeout_ms_ = self.timeout_ms_ + e.setTags(self.tags_) # Assuming setTags is a method in the class + return e + + @staticmethod + def event_to_json(e): + data = {} + + if e.isAcquisitionFinishedEvent(): + data["special"] = "acquisition-end" + return json.dumps(data) + elif e.isAcquisitionSequenceEndEvent(): + data["special"] = "sequence-end" + return json.dumps(data) + + if e.miniumumStartTime_ms_: + data["min_start_time"] = e.miniumumStartTime_ms_ / 1000 + + if e.hasConfigGroup(): + data["config_group"] = [e.configGroup_, e.configPreset_] + + if e.exposure_: + data["exposure"] = e.exposure_ + + if e.slmImage_: + data["slm_pattern"] = e.slmImage_ + + if e.timeout_ms_: + data["timeout_ms"] = e.timeout_ms_ + + axes = {axis: e.axisPositions_[axis] for axis in e.axisPositions_} + if axes: + data["axes"] = axes + + stage_positions = [[stageDevice, e.getStageSingleAxisStagePosition(stageDevice)] for stageDevice in e.getStageDeviceNames()] + if stage_positions: + data["stage_positions"] = stage_positions + + if e.zPosition_: + data["z"] = e.zPosition_ + + if e.xPosition_: + data["x"] = e.xPosition_ + + if e.yPosition_: + data["y"] = e.yPosition_ + + if e.camera_: + data["camera"] = e.camera_ + + if e.getTags() and e.getTags(): # Assuming getTags is a method in the class + data["tags"] = {key: value for key, value in e.getTags().items()} + + props = [[t.dev, t.prop, t.val] for t in e.properties_] + if props: + data["properties"] = props + + return json.dumps(data) + + @staticmethod + def event_from_json(data, acq): + if "special" in data: + if data["special"] == "acquisition-end": + return AcquisitionEvent.createAcquisitionFinishedEvent(acq) + elif data["special"] == "sequence-end": + return AcquisitionEvent.createAcquisitionSequenceEndEvent(acq) + + event = AcquisitionEvent(acq) + + if "axes" in data: + for axisLabel, value in data["axes"].items(): + event.axisPositions_[axisLabel] = value + + if "min_start_time" in data: + event.miniumumStartTime_ms_ = int(data["min_start_time"] * 1000) + + if "timeout" in data: + event.timeout_ms_ = data["timeout"] + + if "config_group" in data: + event.configGroup_ = data["config_group"][0] + event.configPreset_ = data["config_group"][1] + + if "exposure" in data: + event.exposure_ = data["exposure"] + + if "timeout_ms" in data: + event.slmImage_ = data["timeout_ms"] + + if "stage_positions" in data: + for stagePos in data["stage_positions"]: + event.setStageCoordinate(stagePos[0], stagePos[1]) + + if "z" in data: + event.zPosition_ = data["z"] + + if "stage" in data: + deviceName = data["stage"]["device_name"] + position = data["stage"]["position"] + event.axisPositions_[deviceName] = position + if "axis_name" in data["stage"]: + axisName = data["stage"]["axis_name"] + event.stageDeviceNamesToAxisNames_[deviceName] = axisName + + # # Assuming XYTiledAcquisition is a class and AcqEngMetadata is a class or module with constants + # if isinstance(event.acquisition_, XYTiledAcquisition): + # posIndex = event.acquisition_.getPixelStageTranslator().getPositionIndices( + # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_ROW])], + # [int(event.axisPositions_[AcqEngMetadata.AXES_GRID_COL])])[0] + # xyPos = event.acquisition_.getPixelStageTranslator().getXYPosition(posIndex).getCenter() + # event.xPosition_ = xyPos.x + # event.yPosition_ = xyPos.y + + if "x" in data: + event.xPosition_ = data["x"] + + if "y" in data: + event.yPosition_ = data["y"] + + if "slm_pattern" in data: + event.slmImage_ = data["slm_pattern"] + + if "camera" in data: + event.camera_ = data["camera"] + + if "tags" in data: + tags = {key: value for key, value in data["tags"].items()} + event.setTags(tags) + + if "properties" in data: + for trip in data["properties"]: + t = ThreeTuple(trip[0], trip[1], trip[2]) + event.properties_.add(t) + + return event + + def to_json(self): + if self.sequence_: + events = [self.event_to_json(e) for e in self.sequence_] + return json.dumps({"events": events}) + else: + return self.event_to_json(self) + + @staticmethod + def from_json(data, acq): + if "events" not in data: + return AcquisitionEvent.event_from_json(data, acq) + else: + sequence = [AcquisitionEvent.event_from_json(item, acq) for item in data["events"]] + return AcquisitionEvent(sequence) + + def get_camera_device_name(self): + return self.camera_ + + def set_camera_device_name(self, camera): + self.camera_ = camera + + def get_additional_properties(self): + return [(t.dev, t.prop, t.val) for t in self.properties_] + + def should_acquire_image(self): + if self.sequence_: + return True + return self.configPreset_ is not None or len(self.axisPositions_) > 0 + + def has_config_group(self): + return self.configPreset_ is not None and self.configGroup_ is not None + + def get_config_preset(self): + return self.configPreset_ + + def get_config_group(self): + return self.configGroup_ + + def set_config_preset(self, config): + self.configPreset_ = config + + def set_config_group(self, group): + self.configGroup_ = group + + def get_exposure(self): + return self.exposure_ + + def set_exposure(self, exposure): + self.exposure_ = exposure + + def set_property(self, device, property, value): + self.properties_.add(ThreeTuple(device, property, value)) + + def set_minimum_start_time(self, l): + self.miniumumStartTime_ms_ = l + + def get_defined_axes(self): + return set(self.axisPositions_.keys()) + + def set_axis_position(self, label, position): + if position is None: + raise Exception("Cannot set axis position to null") + self.axisPositions_[label] = position + + def set_stage_coordinate(self, deviceName, v, axisName=None): + self.stageCoordinates_[deviceName] = v + self.stageDeviceNamesToAxisNames_[deviceName] = deviceName if axisName is None else axisName + + def get_stage_single_axis_stage_position(self, deviceName): + return self.stageCoordinates_.get(deviceName) + + def get_axis_positions(self): + return self.axisPositions_ + + def get_axis_position(self, label): + return self.axisPositions_.get(label) + + def get_timeout_ms(self): + return self.timeout_ms_ + + def set_time_index(self, index): + self.set_axis_position(AcqEngMetadata.TIME_AXIS, index) + + def set_channel_name(self, name): + self.set_axis_position(AcqEngMetadata.CHANNEL_AXIS, name) + + def get_slm_image(self): + return self.slmImage_ + + def set_z(self, index, position): + if index is not None: + self.set_axis_position(AcqEngMetadata.Z_AXIS, index) + self.zPosition_ = position + + def get_t_index(self): + return self.get_axis_position(AcqEngMetadata.TIME_AXIS) + + def get_z_index(self): + return self.get_axis_position(AcqEngMetadata.Z_AXIS) + + def get_device_axis_name(self, deviceName): + if deviceName not in self.stageDeviceNamesToAxisNames_: + raise Exception(f"No axis name for device {deviceName}. call setStageCoordinate first") + return self.stageDeviceNamesToAxisNames_[deviceName] + + def get_stage_device_names(self): + return set(self.stageDeviceNamesToAxisNames_.keys()) + + @staticmethod + def create_acquisition_finished_event(acq): + evt = AcquisitionEvent(acq) + evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED + return evt + + def is_acquisition_finished_event(self): + return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_FINISHED + + @staticmethod + def create_acquisition_sequence_end_event(acq): + evt = AcquisitionEvent(acq) + evt.specialFlag_ = AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END + return evt + + def is_acquisition_sequence_end_event(self): + return self.specialFlag_ == AcquisitionEvent.SpecialFlag.ACQUISITION_SEQUENCE_END + + def get_z_position(self): + return self.zPosition_ + + def get_minimum_start_time_absolute(self): + if self.miniumumStartTime_ms_ is None: + return None + return self.acquisition_.get_start_time_ms() + self.miniumumStartTime_ms_ + + def get_sequence(self): + return self.sequence_ + + def is_exposure_sequenced(self): + return self.exposureSequenced_ + + def is_config_group_sequenced(self): + return self.configGroupSequenced_ + + def is_xy_sequenced(self): + return self.xySequenced_ + + def is_z_sequenced(self): + return self.zSequenced_ + + def get_x_position(self): + return self.xPosition_ + + def get_camera_image_counts(self, default_camera_device_name): + """ + Get the number of images to be acquired on each camera in a sequence event. + For a non-sequence event, the number of images is 1, and the camera is the core camera. + This is passed in as an argument in order to avoid this class talking to the core directly. + + Args: + default_camera_device_name (str): Default camera device name. + + Returns: + defaultdict: Dictionary containing the camera device names as keys and image counts as values. + """ + # Figure out how many images on each camera and start sequence with appropriate number on each + camera_image_counts = {} + camera_device_names = set() + if self.get_sequence() is None: + camera_image_counts[default_camera_device_name] = 1 + return camera_image_counts + + for event in self.get_sequence(): + camera_device_names.add(event.get_camera_device_name() if event.get_camera_device_name() is not None else + default_camera_device_name) + if None in camera_device_names: + camera_device_names.remove(None) + camera_device_names.add(default_camera_device_name) + + for camera_device_name in camera_device_names: + camera_image_counts[camera_device_name] = sum(1 for event in self.get_sequence() + if event.get_camera_device_name() == camera_device_name) + + if len(camera_device_names) == 1 and camera_device_name == default_camera_device_name: + camera_image_counts[camera_device_name] = len(self.get_sequence()) + + return camera_image_counts + + def get_y_position(self): + return self.yPosition_ + + def get_position_name(self): + axisPosition_ = self.get_axis_position(AcqEngMetadata.POSITION_AXIS) + if isinstance(axisPosition_, str): + return axisPosition_ + return None + + def set_x(self, x): + self.xPosition_ = x + + def set_y(self, y): + self.yPosition_ = y + + def set_tags(self, tags): + self.tags_.clear() + if tags: + self.tags_.update(tags) + + def get_tags(self): + return dict(self.tags_) + + def __str__(self): + if self.specialFlag_ == AcquisitionEvent.SpecialFlag.AcquisitionFinished: + return "Acq finished event" + elif self.specialFlag_ == AcquisitionEvent.SpecialFlag.AcquisitionSequenceEnd: + return "Acq sequence end event" + + builder = [] + for deviceName in self.stageDeviceNamesToAxisNames_.keys(): + builder.append(f"\t{deviceName}: {self.get_stage_single_axis_stage_position(deviceName)}") + + if self.zPosition_ is not None: + builder.append(f"z {self.zPosition_}") + if self.xPosition_ is not None: + builder.append(f"x {self.xPosition_}") + if self.yPosition_ is not None: + builder.append(f"y {self.yPosition_}") + + for axis in self.axisPositions_.keys(): + builder.append(f"\t{axis}: {self.axisPositions_[axis]}") + + if self.camera_ is not None: + builder.append(f"\t{self.camera_}: {self.camera_}") + + return ' '.join(builder) + + +ThreeTuple = namedtuple('ThreeTuple', ['dev', 'prop', 'val']) diff --git a/pycromanager/acquisition/acquisition_superclass.py b/pycromanager/acquisition/acquisition_superclass.py new file mode 100644 index 00000000..4a8ed5e8 --- /dev/null +++ b/pycromanager/acquisition/acquisition_superclass.py @@ -0,0 +1,500 @@ +""" +Generic acquisition functionality used by both Python and Java backends +""" + +import copy +import types +import numpy as np +from typing import Union, List, Iterable +import warnings +from abc import ABCMeta, abstractmethod +from docstring_inheritance import NumpyDocstringInheritanceMeta +import queue +import weakref +from pycromanager.acq_future import AcqNotification, AcquisitionFuture +import os +import threading +from inspect import signature + + +class AcqAlreadyCompleteException(Exception): + def __init__(self, message): + self.message = message + super().__init__(self.message) + +# Subclasses inherit docstrings from abstract base class +class Meta(ABCMeta, NumpyDocstringInheritanceMeta): + pass + +class Acquisition(metaclass=Meta): + + def __init__( + self, + directory: str = None, + name: str = "default_acquisition_name", + image_process_fn: callable = None, + event_generation_hook_fn: callable = None, + pre_hardware_hook_fn: callable = None, + post_hardware_hook_fn: callable = None, + post_camera_hook_fn: callable = None, + notification_callback_fn: callable = None, + image_saved_fn: callable = None, + napari_viewer=None, + debug: int = False + ): + """ + Parameters + ---------- + directory : str + saving directory for this acquisition. If it is not supplied, the image data will be stored in RAM + name : str + Name of the acquisition. This will be used to generate the folder where the data is saved. + image_process_fn : Callable + image processing function that will be called on each image that gets acquired. + Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict + containing the corresponding image metadata. Or a three argument version is accepted, which accepts (image, + metadata, queue), where queue is a Queue object that holds upcoming acquisition events. The function + should return either an (image, metadata) tuple or a list of such tuples + event_generation_hook_fn : Callable + hook function that will as soon as acquisition events are generated (before hardware sequencing optimization + in the acquisition engine. This is useful if one wants to modify acquisition events that they didn't generate + (e.g. those generated by a GUI application). Accepts either one argument (the current acquisition event) + or two arguments (current event, event_queue) + pre_hardware_hook_fn : Callable + hook function that will be run just before the hardware is updated before acquiring + a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are + dispatched to the hardware. Accepts either one argument (the current acquisition event) or two arguments + (current event, event_queue) + post_hardware_hook_fn : Callable + hook function that will be run just before the hardware is updated before acquiring + a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are + dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument + (the current acquisition event) or two arguments (current event, event_queue) + post_camera_hook_fn : Callable + hook function that will be run just after the camera has been triggered to snapImage or + startSequence. A common use case for this hook is when one want to send TTL triggers to the camera from an + external timing device that synchronizes with other hardware. Accepts either one argument (the current + acquisition event) or two arguments (current event, event_queue) + notification_callback_fn : Callable + (Experimental) function that will be called whenever a notification is received from the acquisition engine. These + include various stages of the control of hardware and the camera and saving of images. Notification + callbacks will execute asynchronously with respect to the acquisition process. The supplied function + should take a single argument, which will be an AcqNotification object. It should execute quickly, + so as to not back up the processing of other notifications. + image_saved_fn : Callable + function that takes two arguments (the Axes of the image that just finished saving, and the Dataset) + or three arguments (Axes, Dataset and the event_queue) and gets called whenever a new image is written to + disk + napari_viewer : napari.Viewer + Provide a napari viewer to display acquired data in napari (https://napari.org/) rather than the built-in + NDViewer. None by default. Data is added to the 'pycromanager acquisition' layer, which may be pre-configured by + the user + debug : bool + whether to print debug messages + """ + self._debug = debug + self._dataset = None + self._finished = False + self._exception = None + self._napari_viewer = None + self._notification_queue = queue.Queue(100) + self._image_notification_queue = queue.Queue(100) + self._acq_futures = [] + self._image_process_fn = image_process_fn + + pass + + + def _start_notification_dispatcher(self, notification_callback_fn): + """ + Thread that runs a function that pulls notifications from the queueand dispatches + them to the appropriate listener + """ + def dispatch_notifications(): + events_finished = False + data_sink_finished = False + while True: + # dispatch notifications to all listeners + notification = self._notification_queue.get() + + if AcqNotification.is_acquisition_finished_notification(notification): + events_finished = True + elif AcqNotification.is_data_sink_finished_notification(notification): + data_sink_finished = True + # notify acquisition futures so they can stop blocking + for future in self._acq_futures: + strong_ref = future() + if strong_ref is not None: + strong_ref._notify(notification) + # alert user-specified notification callback + if notification_callback_fn is not None: + notification_callback_fn(notification) + + if events_finished and data_sink_finished: + break + + dispatcher_thread = threading.Thread( + target=dispatch_notifications, + name="NotificationDispatcherThread", + ) + dispatcher_thread.start() + return dispatcher_thread + + @abstractmethod + def get_dataset(self): + """ + Get access to the dataset backing this acquisition + """ + + @abstractmethod + def await_completion(self): + """ + Wait for acquisition to finish and resources to be cleaned up. If data is being written to + disk, this will wait for the data to be written before returning. + """ + + @abstractmethod + def get_viewer(self): + """ + Return a reference to the current viewer, if the show_display argument + was set to True. The returned object is either an instance of NDViewer or napari.Viewer() + """ + + def mark_finished(self): + """ + Signal to acquisition that no more events will be added and it is time to initiate shutdown. + This is only needed if the context manager (i.e. "with Acquisition...") is not used. + """ + # Some acquisition types (e.g. ExploreAcquisitions) generate their own events + # and don't send events over a port + if self._event_queue is not None: + # this should shut down storage and viewer as appropriate + self._event_queue.put(None) + + def acquire(self, event_or_events: dict or list): + """ + Submit an event or a list of events for acquisition. A single event is a python dictionary + with a specific structure. The acquisition engine will determine if multiple events can + be merged into a hardware sequence and executed at once without computer-hardware communication in + between. This sequencing will only take place for events that are within a single call to acquire, + so if you want to ensure this doesn't happen, call acquire multiple times with each event in a + list individually. + + Parameters + ---------- + event_or_events : list, dict + A single acquistion event (a dict) or a list of acquisition events + + """ + if self._acq.are_events_finished(): + raise AcqAlreadyCompleteException( + 'Cannot submit more events because this acquisition is already finished') + + if event_or_events is None: + # manual shutdown + self._event_queue.put(None) + return + + _validate_acq_events(event_or_events) + + axes_or_axes_list = event_or_events['axes'] if type(event_or_events) == dict\ + else [e['axes'] for e in event_or_events] + acq_future = AcquisitionFuture(self, axes_or_axes_list) + self._acq_futures.append(weakref.ref(acq_future)) + # clear out old weakrefs + self._acq_futures = [f for f in self._acq_futures if f() is not None] + + self._event_queue.put(event_or_events) + return acq_future + + + + def abort(self, exception=None): + """ + Cancel any pending events and shut down immediately + + Parameters + ---------- + exception : Exception + The exception that is the reason abort is being called + """ + # Store the exception that caused this + if exception is not None: + self._exception = exception + + # Clear any pending events on the python side, if applicable + if self._event_queue is not None: + self._event_queue.queue.clear() + # Don't send any more events. The event sending thread should know shut itself down by + # checking the status of the acquisition + self._acq.abort() + + def _create_event_queue(self): + """Create thread safe queue for events so they can be passed from multiple processes""" + self._event_queue = queue.Queue() + + def _call_image_process_fn(self, image, metadata): + params = signature(self._process_fn).parameters + processed = None + if len(params) == 2 or len(params) == 3: + try: + if len(params) == 2: + processed = self._process_fn(image, metadata) + elif len(params) == 3: + processed = self._process_fn(image, metadata, self._event_queue) + except Exception as e: + self.abort(Exception("exception in image processor: {}".format(e))) + + else: + self.abort(Exception( + "Incorrect number of arguments for image processing function, must be 2 or 3" + )) + return processed + + ######## Context manager (i.e. "with Acquisition...") ########### + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.mark_finished() + # now wait on it to finish + self.await_completion() + + +def _validate_acq_events(events: dict or list): + """ + Validate if supplied events are a dictionary or a list of dictionaries + that contain valid events. Throw an exception if not + + Parameters + ---------- + events : dict or list + + """ + if isinstance(events, dict): + _validate_acq_dict(events) + elif isinstance(events, list): + if len(events) == 0: + raise Exception('events list cannot be empty') + for event in events: + if isinstance(event, dict): + _validate_acq_dict(event) + else: + raise Exception('events must be a dictionary or a list of dictionaries') + else: + raise Exception('events must be a dictionary or a list of dictionaries') + +def _validate_acq_dict(event: dict): + """ + Validate event dictionary, and raise an exception or supply a warning and fix it if something is incorrect + + Parameters + ---------- + event : dict + + """ + if 'axes' not in event.keys(): + raise Exception('event dictionary must contain an \'axes\' key. This event will be ignored') + if 'row' in event.keys(): + warnings.warn('adding \'row\' as a top level key in the event dictionary is deprecated and will be disallowed in ' + 'a future version. Instead, add \'row\' as a key in the \'axes\' dictionary') + event['axes']['row'] = event['row'] + if 'col' in event.keys(): + warnings.warn('adding \'col\' as a top level key in the event dictionary is deprecated and will be disallowed in ' + 'a future version. Instead, add \'column\' as a key in the \'axes\' dictionary') + event['axes']['column'] = event['col'] + + # TODO check for the validity of other acquisition event fields, and make sure that there aren't unexpected + # other fields, to help users catch simple errors + + +def multi_d_acquisition_events( + num_time_points: int=None, + time_interval_s: Union[float, List[float]]=0, + z_start: float=None, + z_end: float=None, + z_step: float=None, + channel_group: str=None, + channels: list=None, + channel_exposures_ms: list=None, + xy_positions: Iterable=None, + xyz_positions: Iterable=None, + position_labels: List[str]=None, + order: str="tpcz", +): + """Convenience function for generating the events of a typical multi-dimensional acquisition (i.e. an + acquisition with some combination of multiple timepoints, channels, z-slices, or xy positions) + + Parameters + ---------- + num_time_points : int + How many time points if it is a timelapse (Default value = None) + time_interval_s : float or list of floats + the minimum interval between consecutive time points in seconds. If set to 0, the + acquisition will go as fast as possible. If a list is provided, its length should + be equal to 'num_time_points'. Elements in the list are assumed to be the intervals + between consecutive timepoints in the timelapse. First element in the list indicates + delay before capturing the first image (Default value = 0) + z_start : float + z-stack starting position, in µm. If xyz_positions is given z_start is relative + to the points' z position. (Default value = None) + z_end : float + z-stack ending position, in µm. If xyz_positions is given z_start is + relative to the points' z position. (Default value = None) + z_step : float + step size of z-stack, in µm (Default value = None) + channel_group : str + name of the channel group (which should correspond to a config group in micro-manager) (Default value = None) + channels : list of strings + list of channel names, which correspond to possible settings of the config group + (e.g. ['DAPI', 'FITC']) (Default value = None) + channel_exposures_ms : list of floats or ints + list of camera exposure times corresponding to each channel. The length of this list + should be the same as the the length of the list of channels (Default value = None) + xy_positions : iterable + An array of shape (N, 2) containing N (X, Y) stage coordinates. (Default value = None) + xyz_positions : iterable + An array of shape (N, 3) containing N (X, Y, Z) stage coordinates. (Default value = None). + If passed then z_start, z_end, and z_step will be relative to the z_position in xyz_positions. (Default value = None) + position_labels : iterable + An array of length N containing position labels for each of the XY stage positions. (Default value = None) + order : str + string that specifies the order of different dimensions. Must have some ordering of the letters + c, t, p, and z. For example, 'tcz' would run a timelapse where z stacks would be acquired at each channel in + series. 'pt' would move to different xy stage positions and run a complete timelapse at each one before moving + to the next (Default value = 'tpcz') + + Returns + ------- + events : dict + """ + if xy_positions is not None and xyz_positions is not None: + raise ValueError( + "xyz_positions and xy_positions are incompatible arguments that cannot be passed together" + ) + order = order.lower() + if "p" in order and "z" in order and order.index("p") > order.index("z"): + raise ValueError( + "This function requires that the xy position come earlier in the order than z" + ) + if isinstance(time_interval_s, list): + if len(time_interval_s) != num_time_points: + raise ValueError( + "Length of time interval list should be equal to num_time_points" + ) + if position_labels is not None: + if xy_positions is not None and len(xy_positions) != len(position_labels): + raise ValueError("xy_positions and position_labels must be of equal length") + if xyz_positions is not None and len(xyz_positions) != len(position_labels): + raise ValueError("xyz_positions and position_labels must be of equal length") + + # If any of z_start, z_step, z_end are provided, then they should all be provided + # Here we can't use `all` as some of the values of z_start, z_step, z_end + # may be zero and all((0,)) = False + has_zsteps = False + if any([z_start, z_step, z_end]): + if not None in [z_start, z_step, z_end]: + has_zsteps = True + else: + raise ValueError('All of z_start, z_step, and z_end must be provided') + + z_positions = None + if xy_positions is not None: + xy_positions = np.asarray(xy_positions) + z_positions = None + elif xyz_positions is not None: + xyz_positions = np.asarray(xyz_positions) + xy_positions = xyz_positions[:, :2] + z_positions = xyz_positions[:, 2][:, None] + + if has_zsteps: + z_rel = np.arange(z_start, z_end + z_step, z_step) + if z_positions is None: + z_positions = z_rel + if xy_positions is not None: + z_positions = np.broadcast_to( + z_positions, (xy_positions.shape[0], z_positions.shape[0]) + ) + else: + pos = [] + for z in z_positions: + pos.append(z + z_rel) + z_positions = np.asarray(pos) + + if position_labels is None and xy_positions is not None: + position_labels = list(range(len(xy_positions))) + + def generate_events(event, order): + if len(order) == 0: + yield event + return + elif order[0] == "t" and num_time_points is not None and num_time_points > 0: + time_indices = np.arange(num_time_points) + if isinstance(time_interval_s, list): + absolute_start_times = np.cumsum(time_interval_s) + for time_index in time_indices: + new_event = copy.deepcopy(event) + new_event["axes"]["time"] = time_index + if isinstance(time_interval_s, list): + new_event["min_start_time"] = absolute_start_times[time_index] + else: + if time_interval_s != 0: + new_event["min_start_time"] = time_index * time_interval_s + yield generate_events(new_event, order[1:]) + elif order[0] == "z" and z_positions is not None: + if "axes" in event and "position" in event["axes"]: + pos_idx = position_labels.index(event["axes"]["position"]) + zs = z_positions[pos_idx] + else: + zs = z_positions + + for z_index, z in enumerate(zs): + new_event = copy.deepcopy(event) + new_event["axes"]["z"] = z_index + new_event["z"] = z + yield generate_events(new_event, order[1:]) + elif order[0] == "p" and xy_positions is not None: + for p_label, xy in zip(position_labels, xy_positions): + new_event = copy.deepcopy(event) + new_event["axes"]["position"] = p_label + new_event["x"] = xy[0] + new_event["y"] = xy[1] + yield generate_events(new_event, order[1:]) + elif order[0] == "c" and channel_group is not None and channels is not None: + for i in range(len(channels)): + new_event = copy.deepcopy(event) + new_event["config_group"] = [channel_group, channels[i]] + new_event["axes"]["channel"] = channels[i] + if channel_exposures_ms is not None: + new_event["exposure"] = channel_exposures_ms[i] + yield generate_events(new_event, order[1:]) + else: + # this axis appears to be missing + yield generate_events(event, order[1:]) + + # collect all events into a single list + base_event = {"axes": {}} + events = [] + + def appender(next): + """ + + Parameters + ---------- + next : + + + Returns + ------- + + """ + if isinstance(next, types.GeneratorType): + for n in next: + appender(n) + else: + events.append(next) + + appender(generate_events(base_event, order)) + return events + + diff --git a/pycromanager/acquisitions.py b/pycromanager/acquisition/java_backend_acquisitions.py similarity index 57% rename from pycromanager/acquisitions.py rename to pycromanager/acquisition/java_backend_acquisitions.py index 39588560..0c19ab12 100644 --- a/pycromanager/acquisitions.py +++ b/pycromanager/acquisition/java_backend_acquisitions.py @@ -12,21 +12,20 @@ from pycromanager.zmq_bridge._bridge import deserialize_array from pycromanager.zmq_bridge.wrappers import PullSocket, PushSocket, JavaObject, JavaClass from pycromanager.zmq_bridge.wrappers import DEFAULT_BRIDGE_PORT as DEFAULT_PORT -from pycromanager.mm_java_classes import Core, Magellan +from pycromanager.mm_java_classes import ZMQRemoteMMCoreJ, Magellan from ndtiff import Dataset import os.path import queue from docstring_inheritance import NumpyDocstringInheritanceMeta +from pycromanager.acquisition.acquisition_superclass import Acquisition import traceback -from pycromanager.notifications import AcqNotification, AcquisitionFuture +from pycromanager.acq_future import AcqNotification, AcquisitionFuture + -class AcqAlreadyCompleteException(Exception): - def __init__(self, message): - self.message = message - super().__init__(self.message) ### These functions are defined outside the Acquisition class to # prevent problems with pickling when running them in differnet process +# although they are currently only used in different threads def _run_acq_event_source(acquisition, event_port, event_queue, debug=False): event_socket = PushSocket(event_port, debug=debug) @@ -37,16 +36,16 @@ def _run_acq_event_source(acquisition, event_port, event_queue, debug=False): print("got event(s):", events) if events is None: # Initiate the normal shutdown process - if not acquisition._remote_acq.is_finished(): + if not acquisition._acq.is_finished(): # if it has been finished through something happening on the other side event_socket.send({"events": [{"special": "acquisition-end"}]}) # wait for signal that acquisition has received the end signal - while not acquisition._remote_acq.are_events_finished(): - time.sleep(0.001) + while not acquisition._acq.is_finished(): + acquisition._acq.block_until_events_finished(0.01) break # it may have been shut down remotely (e.g. by user Xing out viewer) # if we try to send an event at this time, it will hang indefinitely - if acquisition._remote_acq.is_finished(): + if acquisition._acq.is_finished(): break # TODO in theory it could be aborted in between the check above and sending below, # maybe consider putting a timeout on the send? @@ -103,6 +102,7 @@ def _run_acq_hook(acquisition, pull_port, def _run_image_processor( acquisition, pull_port, push_port, sockets_connected_evt, process_fn, event_queue, debug ): + acquisition._process_fn = process_fn push_socket = PushSocket(pull_port, debug=debug) pull_socket = PullSocket(push_port, debug=debug) if debug: @@ -169,21 +169,7 @@ def process_and_sendoff(image_tags_tuple, original_dtype): else: image = np.reshape(pixels, [metadata["Height"], metadata["Width"]]) - params = signature(process_fn).parameters - processed = None - if len(params) == 2 or len(params) == 3: - try: - if len(params) == 2: - processed = process_fn(image, metadata) - elif len(params) == 3: - processed = process_fn(image, metadata, event_queue) - except Exception as e: - acquisition.abort(Exception("exception in image processor: {}".format(e))) - continue - else: - acquisition.abort(Exception( - "Incorrect number of arguments for image processing function, must be 2 or 3" - )) + processed = acquisition._call_image_process_fn(image, metadata) if processed is None: continue @@ -194,48 +180,27 @@ def process_and_sendoff(image_tags_tuple, original_dtype): else: process_and_sendoff(processed, pixels.dtype) -def _storage_monitor_fn(acquisition, dataset, storage_monitor_push_port, connected_event, - image_saved_fn, event_queue, debug=False): - monitor_socket = PullSocket(storage_monitor_push_port) - connected_event.set() - callback = None - if image_saved_fn is not None: - params = signature(image_saved_fn).parameters - if len(params) == 2: - callback = image_saved_fn - elif len(params) == 3: - callback = lambda axes, dataset: image_saved_fn(axes, dataset, event_queue) - else: - raise Exception('Image saved callbacks must have either 2 or three parameters') - - - try: - while True: - message = monitor_socket.receive() - if "finished" in message: - # Time to shut down - break - - index_entry = message["index_entry"] - axes = dataset._add_index_entry(index_entry) - acquisition._notification_queue.put(AcqNotification.make_image_saved_notification(axes)) - dataset._new_image_arrived = True - if callback is not None: - callback(axes, dataset) - except Exception as e: - acquisition.abort(e) - finally: - monitor_socket.close() - def _notification_handler_fn(acquisition, notification_push_port, connected_event, debug=False): monitor_socket = PullSocket(notification_push_port) connected_event.set() try: + events_finished = False + data_sink_finished = False while True: message = monitor_socket.receive() - acquisition._notification_queue.put(AcqNotification.from_json(message)) - if "acq_finished" in message["type"]: + notification = AcqNotification.from_json(message) + acquisition._notification_queue.put(notification) + # these are processed seperately to handle image saved callback + if AcqNotification.is_image_saved_notification(notification): + acquisition._image_notification_queue.put(notification) + + if AcqNotification.is_acquisition_finished_notification(notification): + events_finished = True + elif AcqNotification.is_data_sink_finished_notification(notification): + data_sink_finished = True + acquisition._image_notification_queue.put(notification) + if events_finished and data_sink_finished: break except Exception as e: @@ -244,9 +209,9 @@ def _notification_handler_fn(acquisition, notification_push_port, connected_even finally: monitor_socket.close() -class Acquisition(object, metaclass=NumpyDocstringInheritanceMeta): +class JavaBackendAcquisition(Acquisition, metaclass=NumpyDocstringInheritanceMeta): """ - Base class for Pycro-Manager acquisitions + Pycro-Manager acquisition that uses a Java runtime backend via a ZeroMQ communication layer. """ def __init__( @@ -258,66 +223,20 @@ def __init__( pre_hardware_hook_fn: callable=None, post_hardware_hook_fn: callable=None, post_camera_hook_fn: callable=None, + notification_callback_fn: callable=None, + image_saved_fn: callable=None, show_display: bool=True, napari_viewer=None, - image_saved_fn: callable=None, - process: bool=False, saving_queue_size: int=20, timeout: int=2000, port: int=DEFAULT_PORT, - debug: int=False, - core_log_debug: int=False, + debug: int=False ): """ Parameters ---------- - directory : str - saving directory for this acquisition. Required unless an image process function will be - implemented that diverts images from saving - name : str - Saving name for the acquisition. Required unless an image process function will be - implemented that diverts images from saving - image_process_fn : Callable - image processing function that will be called on each image that gets acquired. - Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict - containing the corresponding image metadata. Or a three argument version is accepted, which accepts (image, - metadata, queue), where queue is a Queue object that holds upcoming acquisition events. The function - should return either an (image, metadata) tuple or a list of such tuples - event_generation_hook_fn : Callable - hook function that will as soon as acquisition events are generated (before hardware sequencing optimization - in the acquisition engine. This is useful if one wants to modify acquisition events that they didn't generate - (e.g. those generated by a GUI application). Accepts either one argument (the current acquisition event) - or two arguments (current event, event_queue) - pre_hardware_hook_fn : Callable - hook function that will be run just before the hardware is updated before acquiring - a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are - dispatched to the hardware. Accepts either one argument (the current acquisition event) or two arguments - (current event, event_queue) - post_hardware_hook_fn : Callable - hook function that will be run just before the hardware is updated before acquiring - a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are - dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument - (the current acquisition event) or two arguments (current event, event_queue) - post_camera_hook_fn : Callable - hook function that will be run just after the camera has been triggered to snapImage or - startSequence. A common use case for this hook is when one want to send TTL triggers to the camera from an - external timing device that synchronizes with other hardware. Accepts either one argument (the current - acquisition event) or two arguments (current event, event_queue) show_display : bool If True, show the image viewer window. If False, show no viewer. - napari_viewer : napari.Viewer - Provide a napari viewer to display acquired data in napari (https://napari.org/) rather than the built-in - NDViewer. None by default. Data is added to the 'pycromanager acquisition' layer, which may be pre-configured by - the user - image_saved_fn : Callable - function that takes two arguments (the Axes of the image that just finished saving, and the Dataset) - or three arguments (Axes, Dataset and the event_queue) and gets called whenever a new image is written to - disk - process : bool - Use multiprocessing instead of multithreading for acquisition hooks and image - processors. This can be used to speed up CPU-bounded processing by eliminating bottlenecks - caused by Python's Global Interpreter Lock, but also creates complications on Windows-based - systems saving_queue_size : int The number of images to queue (in memory) while waiting to write to disk. Higher values should in theory allow sequence acquisitions to go faster, but requires the RAM to hold images while @@ -325,84 +244,83 @@ def __init__( timeout : Timeout in ms for connecting to Java side port : - Allows overriding the defualt port for using Java side servers on a different port - debug : bool - whether to print debug messages - core_log_debug : bool - Print debug messages on java side in the micro-manager core log + Allows overriding the default port for using Java backends on a different port. Use this + after calling start_headless with the same non-default port """ - self._debug = debug - self._dataset = None - self._finished = False - self._exception = None - self._port = port - self._timeout = timeout - self._nd_viewer = None - self._napari_viewer = None - self._notification_queue = queue.Queue(100) - self._acq_futures = [] - # Get a dict of all named argument values (or default values when nothing provided) - arg_names = [k for k in signature(Acquisition.__init__).parameters.keys() if k != 'self'] + arg_names = [k for k in signature(JavaBackendAcquisition.__init__).parameters.keys() if k != 'self'] l = locals() named_args = {arg_name: (l[arg_name] if arg_name in l else - dict(signature(Acquisition.__init__).parameters.items())[arg_name].default) + dict(signature(JavaBackendAcquisition.__init__).parameters.items())[arg_name].default) for arg_name in arg_names } + + superclass_arg_names = [k for k in signature(Acquisition.__init__).parameters.keys() if k != 'self'] + superclass_args = {key: named_args[key] for key in superclass_arg_names} + super().__init__(**superclass_args) + if directory is not None: # Expend ~ in path directory = os.path.expanduser(directory) # If path is relative, retain knowledge of the current working directory - named_args['directory'] = os.path.abspath(directory) + self._directory = os.path.abspath(directory) + else: + self._directory = None + named_args['directory'] = self._directory - self._create_event_queue(**named_args) + # Java specific parameters + self._port = port + self._timeout = timeout + self._nd_viewer = None + + self._create_event_queue() self._create_remote_acquisition(**named_args) self._initialize_image_processor(**named_args) self._initialize_hooks(**named_args) try: self._remote_notification_handler = JavaObject('org.micromanager.remote.RemoteNotificationHandler', - args=[self._remote_acq], port=self._port, new_socket=False) + args=[self._acq], port=self._port, new_socket=False) self._acq_notification_recieving_thread = self._start_receiving_notifications() - self._acq_notification_dispatcher_thread = self._start_notification_dispatcher() + self._acq_notification_dispatcher_thread = self._start_notification_dispatcher(notification_callback_fn) + # TODO: can remove this after this feature has been present for a while except: - warnings.warn('Could not create acquisition notification handler. This should not affect performance,' - ' but indicates that Micro-Manager is out of date') + traceback.print_exc() + warnings.warn('Could not create acquisition notification handler. ' + 'Update Micro-Manager and Pyrcro-Manager to the latest versions to fix this') # Start remote acquisition # Acquistition.start is now deprecated, so this can be removed later # Acquisitions now get started automatically when the first events submitted # but Magellan acquisitons (and probably others that generate their own events) # will need some new method to submit events only after image processors etc have been added - self._remote_acq.start() + self._acq.start() self._dataset_disk_location = ( - self._remote_acq.get_data_sink().get_storage().get_disk_location() - if self._remote_acq.get_data_sink() is not None + self._acq.get_data_sink().get_storage().get_disk_location() + if self._acq.get_data_sink() is not None else None ) self._start_events() # Load remote storage - data_sink = self._remote_acq.get_data_sink() + data_sink = self._acq.get_data_sink() if data_sink is not None: # load a view of the dataset in progress. This is used so that acq.get_dataset() can be called # while the acquisition is still running, and (optionally )so that a image_saved_fn can be called # when images are written to disk ndtiff_storage = data_sink.get_storage() summary_metadata = ndtiff_storage.get_summary_metadata() - self._remote_storage_monitor = JavaObject('org.micromanager.remote.RemoteStorageMonitor', port=self._port, - new_socket=False) - ndtiff_storage.add_image_written_listener(self._remote_storage_monitor) - self._dataset = Dataset(dataset_path=self._dataset_disk_location, _summary_metadata=summary_metadata) - # Monitor image arrival so they can be loaded on python side, but with no callback function - # Need to do this regardless of whether you use it, so that it signals to shut down on Java side - self._storage_monitor_thread = self._add_storage_monitor_fn(callback_fn=image_saved_fn, debug=self._debug) + if directory is not None: + self._dataset = Dataset(dataset_path=self._dataset_disk_location, _summary_metadata=summary_metadata) + # Monitor image arrival so they can be loaded on python side, but with no callback function + # Need to do this regardless of whether you use it, so that it signals to shut down on Java side + self._storage_monitor_thread = self._add_storage_monitor_fn(image_saved_fn=image_saved_fn) if show_display: if napari_viewer is None: # using NDViewer - self._nd_viewer = self._remote_acq.get_data_sink().get_viewer() + self._nd_viewer = self._acq.get_data_sink().get_viewer() else: # using napari viewer try: @@ -415,36 +333,22 @@ def __init__( start_napari_signalling(self._napari_viewer, self.get_dataset()) - ######## Public API ########### + ######## Public API methods with unique implementations for Java backend ########### def get_dataset(self): - """ - Get access to the dataset backing this acquisition. If the acquisition is in progress, - return a Dataset object that wraps the java class containing it. If the acquisition is finished, - load the dataset from disk on the Python side for better performance - """ if self._finished: if self._dataset is None: self._dataset = Dataset(self._dataset_disk_location) return self._dataset - def mark_finished(self): - """ - Signal to acquisition that no more events will be added and it is time to initiate shutdown. - This is only needed if the context manager (i.e. "with Acquisition...") is not used. - """ - # Some acquisition types (e.g. Magellan) generate their own events - # and don't send events over a port - if self._event_queue is not None: - # this should shut down storage and viewer as apporpriate - self._event_queue.put(None) - def await_completion(self): - """Wait for acquisition to finish and resources to be cleaned up""" - while not self._remote_acq.are_events_finished() or ( - self._remote_acq.get_data_sink() is not None and not self._remote_acq.get_data_sink().is_finished()): - time.sleep(1 if self._debug else 0.05) + while not self._acq.are_events_finished() or ( + self._acq.get_data_sink() is not None and not self._acq.get_data_sink().is_finished()): self._check_for_exceptions() + self._acq.block_until_events_finished(0.01) + # This will block until saving is finished, if there is a data sink + self._acq.wait_for_completion() + self._check_for_exceptions() for hook_thread in self._hook_threads: hook_thread.join() @@ -457,9 +361,6 @@ def await_completion(self): # Wait on all the other threads to shut down properly if hasattr(self, '_storage_monitor_thread'): self._storage_monitor_thread.join() - # now that the shutdown signal has been received from the monitor, - # tell it it is okay to shutdown its push socket - self._remote_storage_monitor.storage_monitoring_complete() if hasattr(self, '_acq_notification_recieving_thread'): # for backwards compatiblitiy with older versions of Pycromanager java before this added @@ -467,87 +368,20 @@ def await_completion(self): self._remote_notification_handler.notification_handling_complete() self._acq_notification_dispatcher_thread.join() - + self._acq = None self._finished = True - def acquire(self, event_or_events: dict or list): - """Submit an event or a list of events for acquisition. Optimizations (i.e. taking advantage of - hardware synchronization, where available), will take place across this list of events, but not - over multiple calls of this method. A single event is a python dictionary with a specific structure - - Parameters - ---------- - event_or_events : list, dict - A single acquistion event (a dict) or a list of acquisition events - - """ - if self._remote_acq.is_finished(): - raise AcqAlreadyCompleteException( - 'Cannot submit more events because this acquisition is already finished') - - if event_or_events is None: - # manual shutdown - self._event_queue.put(None) - return - - _validate_acq_events(event_or_events) - - axes_or_axes_list = event_or_events['axes'] if type(event_or_events) == dict\ - else [e['axes'] for e in event_or_events] - acq_future = AcquisitionFuture(self, axes_or_axes_list) - self._acq_futures.append(weakref.ref(acq_future)) - # clear out old weakrefs - self._acq_futures = [f for f in self._acq_futures if f() is not None] - - self._event_queue.put(event_or_events) - return acq_future - - def abort(self, exception=None): - """ - Cancel any pending events and shut down immediately - - Parameters - ---------- - exception : Exception - The exception that is the reason abort is being called - """ - # Store the exception that caused this - if exception is not None: - self._exception = exception - - # Clear any pending events on the python side, if applicable - if self._event_queue is not None: - self._event_queue.queue.clear() - # Don't send any more events. The event sending thread should know shut itself down by - # checking the status of the acquisition - self._remote_acq.abort() def get_viewer(self): - """ - Return a reference to the current viewer, if the show_display argument - was set to True. The returned object is either an instance of NDViewer or napari.Viewer() - """ if self._napari_viewer is None: return self._nd_viewer else: return self._napari_viewer - ######## Context manager (i.e. "with Acquisition...") ########### - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.mark_finished() - # now wait on it to finish - self.await_completion() - ######## Private methods ########### - def _start_receiving_notifications(self): """ Thread that runs a function that pulls notifications from the acquisition engine and puts them on a queue - This is not all notifications, just ones that are relevant to the acquisition. Specifically, it does not - include notifications the progress of data saving """ connected_event = threading.Event() @@ -571,75 +405,42 @@ def _start_receiving_notifications(self): self._remote_notification_handler.start() return notification_thread - def _start_notification_dispatcher(self): - """ - Thread that runs a function that pulls notifications from the queue on the python side and dispatches - them to the appropriate listener - """ - def dispatch_notifications(): - while True: - # dispatch notifications to all listeners - try: - notification = self._notification_queue.get(timeout=0.05) # 50 ms timeout - except queue.Empty: - storage_monitoring_ongoing = hasattr(self, '_storage_monitor_thread')\ - and self._storage_monitor_thread.is_alive() - acq_notifications_ongoing = hasattr(self, '_acq_notification_recieving_thread')\ - and self._acq_notification_recieving_thread.is_alive() - if not storage_monitoring_ongoing and not acq_notifications_ongoing and self._notification_queue.empty(): - # if all the threads have shut down and the queue is empty, then shut down - break - else: - # print(notification.to_json()) - for future in self._acq_futures: - strong_ref = future() - if strong_ref is not None: - strong_ref._notify(notification) - # TODO: can also add a user-specified notification callback - - dispatcher_thread = threading.Thread( - target=dispatch_notifications, - name="NotificationDispatcherThread", - ) - dispatcher_thread.start() - return dispatcher_thread - - - def _add_storage_monitor_fn(self, callback_fn=None, debug=False): + def _add_storage_monitor_fn(self, image_saved_fn=None): """ Add a callback function that gets called whenever a new image is writtern to disk (for acquisitions in progress only) Parameters ---------- - callback_fn : Callable - callable with that takes 1 argument, the axes dict of the image just written + image_saved_fn : Callable + user function to be run whenever an image is ready on disk """ - connected_event = threading.Event() - - push_port = self._remote_storage_monitor.get_port() - monitor_thread = threading.Thread( - target=_storage_monitor_fn, - args=( - self, - self.get_dataset(), - push_port, - connected_event, - callback_fn, - self._event_queue, - debug, - ), - name="ImageSavedCallbackThread", - ) - - monitor_thread.start() - - # Wait for pulling to start before you signal for pushing to start - connected_event.wait() # wait for push/pull sockets to connect + # TODO: this should read from a queue of image-specific notifications and dispatch accordingly + + callback = None + if image_saved_fn is not None: + params = signature(image_saved_fn).parameters + if len(params) == 2: + callback = image_saved_fn + elif len(params) == 3: + callback = lambda axes, dataset: image_saved_fn(axes, dataset, self._event_queue) + else: + raise Exception('Image saved callbacks must have either 2 or three parameters') - # start pushing out all the image written events (including ones that have already accumulated) - self._remote_storage_monitor.start() - return monitor_thread + def _storage_monitor_fn(): + dataset = self.get_dataset() + while True: + image_notification = self._image_notification_queue.get() + if AcqNotification.is_data_sink_finished_notification(image_notification): + break + index_entry = image_notification.id.encode('ISO-8859-1') + axes = dataset._add_index_entry(index_entry) + dataset._new_image_arrived = True + if callback is not None: + callback(axes, dataset) + t = threading.Thread(target=_storage_monitor_fn, name='StorageMonitorThread') + t.start() + return t def _check_for_exceptions(self): """ @@ -647,13 +448,13 @@ def _check_for_exceptions(self): or on the Java side (i.e. hardware control) """ # these will throw exceptions - self._remote_acq.check_for_exceptions() + self._acq.check_for_exceptions() if self._exception is not None: raise self._exception def _start_events(self, **kwargs): - self.event_port = self._remote_acq.get_event_port() + self.event_port = self._acq.get_event_port() self._event_thread = threading.Thread( target=_run_acq_event_source, @@ -668,68 +469,55 @@ def _initialize_image_processor(self, **kwargs): java_processor = JavaObject( "org.micromanager.remote.RemoteImageProcessor", port=self._port ) - self._remote_acq.add_image_processor(java_processor) + self._acq.add_image_processor(java_processor) self._processor_thread = self._start_processor( java_processor, kwargs['image_process_fn'], # Some acquisitions (e.g. Explore acquisitions) create events on Java side self._event_queue if hasattr(self, '_event_queue') else None, - process=kwargs['process']) - + process=False) def _initialize_hooks(self, **kwargs): self._hook_threads = [] if kwargs['event_generation_hook_fn'] is not None: hook = JavaObject( - "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._remote_acq] + "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._acq] ) self._hook_threads.append(self._start_hook(hook, kwargs['event_generation_hook_fn'], - self._event_queue, process=kwargs['process'])) - self._remote_acq.add_hook(hook, self._remote_acq.EVENT_GENERATION_HOOK) + self._event_queue, process=False)) + self._acq.add_hook(hook, self._acq.EVENT_GENERATION_HOOK) if kwargs['pre_hardware_hook_fn'] is not None: hook = JavaObject( - "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._remote_acq] + "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._acq] ) self._hook_threads.append(self._start_hook(hook, kwargs['pre_hardware_hook_fn'], self._event_queue, - process=kwargs['process'])) - self._remote_acq.add_hook(hook, self._remote_acq.BEFORE_HARDWARE_HOOK) + process=False)) + self._acq.add_hook(hook, self._acq.BEFORE_HARDWARE_HOOK) if kwargs['post_hardware_hook_fn'] is not None: hook = JavaObject( - "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._remote_acq] + "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._acq] ) self._hook_threads.append(self._start_hook(hook, kwargs['post_hardware_hook_fn'], - self._event_queue, process=kwargs['process'])) - self._remote_acq.add_hook(hook, self._remote_acq.AFTER_HARDWARE_HOOK) + self._event_queue, process=False)) + self._acq.add_hook(hook, self._acq.AFTER_HARDWARE_HOOK) if kwargs['post_camera_hook_fn'] is not None: hook = JavaObject( - "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._remote_acq], + "org.micromanager.remote.RemoteAcqHook", port=self._port, args=[self._acq], ) self._hook_threads.append(self._start_hook(hook, kwargs['post_camera_hook_fn'], - self._event_queue, process=kwargs['process'])) - self._remote_acq.add_hook(hook, self._remote_acq.AFTER_CAMERA_HOOK) - - - def _create_event_queue(self, **kwargs): - # Create thread safe queue for events so they can be passed from multiple processes - self._event_queue = multiprocessing.Queue() if kwargs['process'] else queue.Queue() + self._event_queue, process=False)) + self._acq.add_hook(hook, self._acq.AFTER_CAMERA_HOOK) def _create_remote_acquisition(self, **kwargs): - core = Core(port=self._port, timeout=self._timeout, debug=self._debug) + core = ZMQRemoteMMCoreJ(port=self._port, timeout=self._timeout, debug=self._debug) acq_factory = JavaObject("org.micromanager.remote.RemoteAcquisitionFactory", - # # create the acquisition on a dedicated socket to ensure it doesnt interfere with user code - # new_socket=True, + # create a new socket for it to run on so that it can have blocking calls without interfering with + # the main socket or other internal sockets + new_socket=True, port=self._port, args=[core], debug=self._debug) - show_viewer = kwargs['show_display'] is True and\ - kwargs['napari_viewer'] is None and\ - (kwargs['directory'] is not None and kwargs['name'] is not None) - - self._remote_acq = acq_factory.create_acquisition( - kwargs['directory'], - kwargs['name'], - show_viewer, - kwargs['saving_queue_size'], - kwargs['core_log_debug'], - ) + show_viewer = kwargs['show_display'] is True and kwargs['napari_viewer'] is None + self._acq = acq_factory.create_acquisition(kwargs['directory'], kwargs['name'], show_viewer, + kwargs['saving_queue_size'], self._debug,) def _start_hook(self, remote_hook, remote_hook_fn : callable, event_queue, process): """ @@ -819,7 +607,7 @@ def _start_processor(self, processor, process_fn, event_queue, process): return processor_thread -class XYTiledAcquisition(Acquisition): +class XYTiledAcquisition(JavaBackendAcquisition): """ For making tiled images with an XY stage and multiresolution saving (e.g. for making one large contiguous image of a sample larger than the field of view) @@ -837,12 +625,10 @@ def __init__( post_camera_hook_fn: callable=None, show_display: bool=True, image_saved_fn: callable=None, - process: bool=False, saving_queue_size: int=20, timeout: int=1000, port: int=DEFAULT_PORT, debug: bool=False, - core_log_debug: bool=False, ): """ Parameters @@ -869,7 +655,7 @@ def __init__( super().__init__(**named_args) def _create_remote_acquisition(self, port, **kwargs): - core = Core(port=self._port, timeout=self._timeout) + core = ZMQRemoteMMCoreJ(port=self._port, timeout=self._timeout) acq_factory = JavaObject( "org.micromanager.remote.RemoteAcquisitionFactory", port=self._port, args=[core] ) @@ -892,10 +678,10 @@ def _create_remote_acquisition(self, port, **kwargs): y_overlap, self.max_multi_res_index if self.max_multi_res_index is not None else -1, kwargs['saving_queue_size'], - kwargs['core_log_debug'], + self._debug, ) -class ExploreAcquisition(Acquisition): +class ExploreAcquisition(JavaBackendAcquisition): """ Launches a user interface for an "Explore Acquisition"--a type of XYTiledAcquisition in which acquisition events come from the user dynamically driving the stage and selecting @@ -915,12 +701,10 @@ def __init__( post_camera_hook_fn: callable=None, show_display: bool=True, image_saved_fn: callable=None, - process: bool=False, saving_queue_size: int=20, timeout: int=1000, port: int=DEFAULT_PORT, debug: bool=False, - core_log_debug: bool=False, ): """ Parameters @@ -966,7 +750,7 @@ def _create_event_queue(self, **kwargs): pass # Comes from the user -class MagellanAcquisition(Acquisition): +class MagellanAcquisition(JavaBackendAcquisition): """ Class used for launching Micro-Magellan acquisitions. Must pass either magellan_acq_index or magellan_explore as an argument @@ -985,7 +769,6 @@ def __init__( timeout: int=500, port: int=DEFAULT_PORT, debug: bool=False, - core_log_debug: bool=False, ): """ Parameters @@ -1019,54 +802,3 @@ def _create_remote_acquisition(self, **kwargs): elif self.magellan_explore: self._remote_acq = magellan_api.create_explore_acquisition(False) self._event_queue = None - -def _validate_acq_events(events: dict or list): - """ - Validate if supplied events are a dictionary or a list of dictionaries - that contain valid events. Throw an exception if not - - Parameters - ---------- - events : dict or list - - """ - if isinstance(events, dict): - _validate_acq_dict(events) - elif isinstance(events, list): - if len(events) == 0: - raise Exception('events list cannot be empty') - for event in events: - if isinstance(event, dict): - _validate_acq_dict(event) - else: - raise Exception('events must be a dictionary or a list of dictionaries') - else: - raise Exception('events must be a dictionary or a list of dictionaries') - -def _validate_acq_dict(event: dict): - """ - Validate event dictionary, and raise an exception or supply a warning and fix it if something is incorrect - - Parameters - ---------- - event : dict - - """ - if 'axes' not in event.keys(): - raise Exception('event dictionary must contain an \'axes\' key. This event will be ignored') - if 'row' in event.keys(): - warnings.warn('adding \'row\' as a top level key in the event dictionary is deprecated and will be disallowed in ' - 'a future version. Instead, add \'row\' as a key in the \'axes\' dictionary') - event['axes']['row'] = event['row'] - if 'col' in event.keys(): - warnings.warn('adding \'col\' as a top level key in the event dictionary is deprecated and will be disallowed in ' - 'a future version. Instead, add \'column\' as a key in the \'axes\' dictionary') - event['axes']['column'] = event['col'] - - # TODO check for the validity of other acquisition event fields, and make sure that there aren't unexpected - # other fields, to help users catch simple errors - - - - - diff --git a/pycromanager/acquisition/python_backend_acquisitions.py b/pycromanager/acquisition/python_backend_acquisitions.py new file mode 100644 index 00000000..2e01a73c --- /dev/null +++ b/pycromanager/acquisition/python_backend_acquisitions.py @@ -0,0 +1,215 @@ +from docstring_inheritance import NumpyDocstringInheritanceMeta +from pycromanager.acquisition.acq_eng_py.main.AcqEngPy_Acquisition import Acquisition as pymmcore_Acquisition +from pycromanager.acquisition.RAMStorage import RAMDataStorage +from pycromanager.acquisition.acquisition_superclass import _validate_acq_events, Acquisition +from pycromanager.acquisition.acq_eng_py.main.acquisition_event import AcquisitionEvent +from pycromanager.acq_future import AcqNotification +import threading +from inspect import signature + + +class PythonBackendAcquisition(Acquisition, metaclass=NumpyDocstringInheritanceMeta): + """ + Pycro-Manager acquisition that uses a Python runtime backend. Unlike the Java backend, + Python-backed acquisitions currently do not automatically write data to disk. Instead, by default, + they store data in RAM which can be queried with the Dataset class. If instead you want to + implement your own data storage, you can pass an image_process_fn which diverts the data to + a custom endpoint. + """ + + def __init__( + self, + directory: str=None, + name: str=None, + image_process_fn: callable=None, + event_generation_hook_fn: callable = None, + pre_hardware_hook_fn: callable=None, + post_hardware_hook_fn: callable=None, + post_camera_hook_fn: callable=None, + notification_callback_fn: callable=None, + napari_viewer=None, + image_saved_fn: callable=None, + debug: int=False, + ): + # Get a dict of all named argument values (or default values when nothing provided) + arg_names = [k for k in signature(PythonBackendAcquisition.__init__).parameters.keys() if k != 'self'] + l = locals() + named_args = {arg_name: (l[arg_name] if arg_name in l else + dict(signature(PythonBackendAcquisition.__init__).parameters.items())[arg_name].default) + for arg_name in arg_names } + super().__init__(**named_args) + if directory is not None: + raise NotImplementedError('Saving to disk is not yet implemented for the python backend. ') + self._dataset = RAMDataStorage() + self._finished = False + self._notifications_finished = False + self._create_event_queue() + + self._process_fn = image_process_fn + self._image_processor = ImageProcessor(self) if image_process_fn is not None else None + + + # create a thread that submits events + # events can be added to the queue through image processors, hooks, or the acquire method + def submit_events(): + while True: + event_or_events = self._event_queue.get() + if event_or_events is None: + self._acq.finish() + self._acq.block_until_events_finished() + break + _validate_acq_events(event_or_events) + if isinstance(event_or_events, dict): + event_or_events = [event_or_events] + # convert to objects + event_or_events = [AcquisitionEvent.from_json(event, self._acq) for event in event_or_events] + self._acq.submit_event_iterator(iter(event_or_events)) + self._event_thread = threading.Thread(target=submit_events) + self._event_thread.start() + + self._acq = pymmcore_Acquisition(self._dataset) + + # receive notifications from the acquisition engine. Unlike the java_backend analog + # of this, the python backend does not have a separate thread for notifications because + # it can just use the one in AcqEngPy + def post_notification(notification): + self._notification_queue.put(notification) + # these are processed seperately to handle image saved callback + if AcqNotification.is_image_saved_notification(notification): + self._image_notification_queue.put(notification) + + self._acq.add_acq_notification_listener(NotificationListener(post_notification)) + + self._notification_dispatch_thread = self._start_notification_dispatcher(notification_callback_fn) + + # add hooks and image processor + if pre_hardware_hook_fn is not None: + self._acq.add_hook(AcquisitionHook(pre_hardware_hook_fn),self._acq.BEFORE_HARDWARE_HOOK) + if post_hardware_hook_fn is not None: + self._acq.add_hook(AcquisitionHook(post_hardware_hook_fn),self._acq.AFTER_HARDWARE_HOOK) + if post_camera_hook_fn is not None: + self._acq.add_hook(AcquisitionHook(post_camera_hook_fn),self._acq.AFTER_CAMERA_HOOK) + if event_generation_hook_fn is not None: + self._acq.add_hook(AcquisitionHook(event_generation_hook_fn),self._acq.EVENT_GENERATION_HOOK) + if self._image_processor is not None: + self._acq.add_image_processor(self._image_processor) + + + if napari_viewer is not None: + # using napari viewer + try: + import napari + except: + raise Exception('Napari must be installed in order to use this feature') + from pycromanager.napari_util import start_napari_signalling + assert isinstance(napari_viewer, napari.Viewer), 'napari_viewer must be an instance of napari.Viewer' + self._napari_viewer = napari_viewer + start_napari_signalling(self._napari_viewer, self.get_dataset()) + + + ######## Public API ########### + def get_dataset(self): + return self._dataset + + def await_completion(self): + """Wait for acquisition to finish and resources to be cleaned up""" + while not self._acq.are_events_finished() or ( + self._acq.get_data_sink() is not None and not self._acq.get_data_sink().is_finished()): + self._check_for_exceptions() + self._acq.block_until_events_finished(0.05) + if self._acq.get_data_sink() is not None: + self._acq.get_data_sink().block_until_finished(0.05) + self._check_for_exceptions() + self._event_thread.join() + self._notification_dispatch_thread.join() + + self._acq = None + self._finished = True + + def get_viewer(self): + """ + Return a reference to the current viewer, if the show_display argument + was set to True. The returned object is either an instance of NDViewer or napari.Viewer() + """ + return self._napari_viewer + + ######## Context manager (i.e. "with Acquisition...") ########### + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.mark_finished() + # now wait on it to finish + self.await_completion() + + def _check_for_exceptions(self): + """ + Check for exceptions on the python side (i.e. hooks and processors) + or on the Java side (i.e. hardware control) + """ + # these will throw exceptions + self._acq.check_for_exceptions() + if self._exception is not None: + raise self._exception + + def _are_acquisition_notifications_finished(self): + """ + Called by the storage to check if all notifications have been processed + """ + return self._notifications_finished + +class ImageProcessor: + """ + This is the equivalent of RemoteImageProcessor in the Java version. + It runs its own thread, polls the input queue for images, calls + the process function, and puts the result in the output queue. + """ + + + def __init__(self, pycromanager_acq): + self._pycromanager_acq = pycromanager_acq + + def set_acq_and_queues(self, acq, input, output): + self.input_queue = input + self.output_queue = output + self._acq = acq + self._process_thread = threading.Thread(target=self._process) + self._process_thread.start() + + def _process(self): + while True: + # wait for an image to arrive + tagged_image = self.input_queue.get() + if tagged_image.tags is None and tagged_image.pix is None: + # this is a signal to stop + self.output_queue.put(tagged_image) + break + process_fn_result = self._pycromanager_acq._call_image_process_fn(tagged_image.tags, tagged_image.pix) + if process_fn_result is not None: + self.output_queue.put(process_fn_result) + # otherwise the image processor intercepted the image and nothing to do here + +class AcquisitionHook: + """ + Lightweight wrapper to convert function pointers to AcqEng hooks + """ + + def __init__(self, hook_fn): + self._hook_fn = hook_fn + + def run(self, event): + self._hook_fn(event) + + def close(self): + pass # nothing to do here + +class NotificationListener: + """ + Lightweight wrapper to convert function pointers to AcqEng notification listeners + """ + + def __init__(self, notification_fn): + self._notification_fn = notification_fn + + def post_notification(self, notification): + self._notification_fn(notification) \ No newline at end of file diff --git a/pycromanager/core.py b/pycromanager/core.py new file mode 100644 index 00000000..85bf512c --- /dev/null +++ b/pycromanager/core.py @@ -0,0 +1,15 @@ +from pycromanager.mm_java_classes import ZMQRemoteMMCoreJ +import pymmcore +from pycromanager.headless import _PYMMCORES + +class Core(): + """ + Return a remote Java ZMQ Core, or a local Python Core, if the start_headless has been called with a Python backend + """ + + def __new__(cls, **kwargs): + if _PYMMCORES: + return _PYMMCORES[0] + else: + return ZMQRemoteMMCoreJ(**kwargs) + diff --git a/pycromanager/headless.py b/pycromanager/headless.py new file mode 100644 index 00000000..68ccd021 --- /dev/null +++ b/pycromanager/headless.py @@ -0,0 +1,208 @@ +import subprocess +import platform +import atexit +import threading +import types + +from pycromanager.acquisition.acq_eng_py.internal.engine import Engine +from pycromanager.zmq_bridge._bridge import _Bridge +from pymmcore import CMMCore +import pymmcore + +import re + +class TaggedImage: + + def __init__(self, tags, pix): + self.tags = tags + self.pix = pix + +def _camel_to_snake(name): + """ + Convert camelCase string to snake_case + """ + s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + +def _create_pymmcore_instance(): + """ + Make a subclass of CMMCore with two differences: + + 1. All methods are converted to snake_case + 2. add convenience methods to match the MMCoreJ API: + """ + + # Create a new dictionary for the class attributes + new_attributes = {} + + # Iterate through the original attributes + for attr_name, attr_value in vars(CMMCore).items(): + # If it's a dunder method, skip it (we don't want to override these) + if attr_name.startswith("__") and attr_name.endswith("__"): + continue + # If the attribute is callable (i.e., a method), convert its name to snake_case and add it + if callable(attr_value): + new_attr_name = _camel_to_snake(attr_name) + new_attributes[new_attr_name] = attr_value + + # Create and return a new class that subclasses the original class and has the new attributes + clz = type(CMMCore.__name__ + "SnakeCase", (CMMCore,), new_attributes) + + instance = clz() + + def pop_next_tagged_image(self): + md = pymmcore.Metadata() + pix = self.pop_next_image_md(0, 0, md) + tags = {key: md.GetSingleTag(key).GetValue() for key in md.GetKeys()} + return TaggedImage(tags, pix) + + def get_tagged_image(self, cam_index, camera, height, width, binning=None, pixel_type=None, roi_x_start=None, + roi_y_start=None): + """ + Different signature than the Java version because of difference in metadata handling in the swig layers + """ + pix = self.get_image() + md = pymmcore.Metadata() + # most of the same tags from pop_next_tagged_image, which may not be the same as the MMCoreJ version of this function + tags = {'Camera': camera, 'Height': height, 'Width': width, 'PixelType': pixel_type, + 'CameraChannelIndex': cam_index} + # Could optionally add these for completeness but there might be a performance hit + if binning is not None: + tags['Binning'] = binning + if roi_x_start is not None: + tags['ROI-X-start'] = roi_x_start + if roi_y_start is not None: + tags['ROI-Y-start'] = roi_y_start + + return TaggedImage(tags, pix) + + instance.get_tagged_image = types.MethodType(get_tagged_image, instance) + instance.pop_next_tagged_image = types.MethodType(pop_next_tagged_image, instance) + + # attach TaggedImage class + instance.TaggedImage = TaggedImage + return instance + + +_JAVA_HEADLESS_SUBPROCESSES = [] +_PYMMCORES = [] + +def stop_headless(debug=False): + for p in _JAVA_HEADLESS_SUBPROCESSES: + if debug: + print('Stopping headless process with pid {}'.format(p.pid)) + p.terminate() + if debug: + print('Waiting for process with pid {} to terminate'.format(p.pid)) + p.wait() # wait for process to terminate + if debug: + print('Process with pid {} terminated'.format(p.pid)) + _JAVA_HEADLESS_SUBPROCESSES.clear() + if debug: + print('Stopping {} pymmcore instances'.format(len(_PYMMCORES))) + for c in _PYMMCORES: + if debug: + print('Stopping pymmcore instance') + c.unloadAllDevices() + if debug: + print('Unloaded all devices') + Engine.get_instance().shutdown() + if debug: + print('Engine shut down') + _PYMMCORES.clear() + if debug: + print('Headless stopped') + +# make sure any Java processes are cleaned up when Python exits +atexit.register(stop_headless) + +def start_headless( + mm_app_path: str, config_file: str='', java_loc: str=None, + core_log_path: str='', python_backend=False, + buffer_size_mb: int=1024, max_memory_mb: int=2000, + port: int=_Bridge.DEFAULT_PORT, debug=False): + """ + Start a Java process that contains the neccessary libraries for pycro-manager to run, + so that it can be run independently of the Micro-Manager GUI/application. This calls + will create and initialize MMCore with the configuration file provided. + + On windows plaforms, the Java Runtime Environment will be grabbed automatically + as it is installed along with the Micro-Manager application. + + On non-windows platforms, it may need to be installed/specified manually in order to ensure compatibility. + Installing Java 11 is the most likely version to work without issue + + Parameters + ---------- + mm_app_path : str + Path to top level folder of Micro-Manager installation (made with graphical installer) + config_file : str + Path to micro-manager config file, with which core will be initialized. If None then initialization + is left to the user. + java_loc: str + Path to the java version that it should be run with (Java backend only) + core_log_path : str + Path to where core log files should be created + python_backend : bool + Whether to use the python backend or the Java backend + buffer_size_mb : int + Size of circular buffer in MB in MMCore + max_memory_mb : int + Maximum amount of memory to be allocated to JVM + port : int + Default port to use for ZMQServer (Java backend only) + debug : bool + Print debug messages + """ + + if python_backend: + mmc = _create_pymmcore_instance() + mmc.set_device_adapter_search_paths([mm_app_path]) + mmc.load_system_configuration(config_file) + _PYMMCORES.append(mmc) # Store so it doesn't get garbage collected + Engine(mmc) + else: + classpath = mm_app_path + '/plugins/Micro-Manager/*' + if java_loc is None: + if platform.system() == "Windows": + # windows comes with its own JRE + java_loc = mm_app_path + "/jre/bin/javaw.exe" + else: + java_loc = "java" + # This starts Java process and instantiates essential objects (core, + # acquisition engine, ZMQServer) + process = subprocess.Popen( + [ + java_loc, + "-classpath", + classpath, + "-Dsun.java2d.dpiaware=false", + f"-Xmx{max_memory_mb}m", + + # This is used by MM desktop app but breaks things on MacOS...Don't think its neccessary + # "-XX:MaxDirectMemorySize=1000", + "org.micromanager.remote.HeadlessLauncher", + str(port), + config_file, + str(buffer_size_mb), + core_log_path, + ], cwd=mm_app_path, stdout=subprocess.PIPE + ) + _JAVA_HEADLESS_SUBPROCESSES.append(process) + + started = False + output = True + # Some drivers output various status messages which need to be skipped over to look for the STARTED token. + while output and not started: + output = process.stdout.readline() + started = "STARTED" in output.decode('utf-8') + if not started: + raise Exception('Error starting headless mode') + if debug: + print('Headless mode started') + def logger(): + while process in _JAVA_HEADLESS_SUBPROCESSES: + print(process.stdout.readline().decode('utf-8')) + threading.Thread(target=logger).start() + + diff --git a/pycromanager/mm_java_classes.py b/pycromanager/mm_java_classes.py index bbc9d0d8..5478eb06 100644 --- a/pycromanager/mm_java_classes.py +++ b/pycromanager/mm_java_classes.py @@ -24,7 +24,7 @@ def __init__(self, callback_fn=None, bridge_port=DEFAULT_BRIDGE_PORT): def _callback_recieving_fn(self, bridge_port, core_callback): callback_java = JavaObject( - "org.micromanager.remote.RemoteCoreCallback", args=(Core(port=bridge_port),) + "org.micromanager.remote.RemoteCoreCallback", args=(ZMQRemoteMMCoreJ(port=bridge_port),) ) port = callback_java.get_push_port() @@ -56,7 +56,7 @@ def __del__(self): self._thread.join() -class Core(JavaObject): +class ZMQRemoteMMCoreJ(JavaObject): """ Remote instance of Micro-Manager Core """ diff --git a/pycromanager/napari_util.py b/pycromanager/napari_util.py index 2a7647fa..33b0341c 100644 --- a/pycromanager/napari_util.py +++ b/pycromanager/napari_util.py @@ -37,13 +37,12 @@ def napari_signaller(): image = None if dataset is not None and dataset.has_new_image(): - # A new image has arrived, but we only need to regenerate the dask array - # if its shape has changed + # A new image has arrived, this could be overwriting something existing or have a new combination of axes + image = dataset.as_array() shape = np.array([len(dataset.axes[name]) for name in dataset.axes.keys()]) if not hasattr(napari_signaller, 'old_shape') or \ napari_signaller.old_shape.size != shape.size or \ np.any(napari_signaller.old_shape != shape): - image = dataset.as_array() napari_signaller.old_shape = shape yield image diff --git a/pycromanager/test/conftest.py b/pycromanager/test/conftest.py index 4a37a908..1b464242 100644 --- a/pycromanager/test/conftest.py +++ b/pycromanager/test/conftest.py @@ -7,17 +7,17 @@ import requests import re import time +import glob import pycromanager from pycromanager import start_headless -from pycromanager.acq_util import stop_headless +from pycromanager.headless import stop_headless import socket def is_port_in_use(port): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: return s.connect_ex(('localhost', port)) == 0 - def find_jar(pathname, jar_name): p = re.compile(jar_name + r"-(\d+).(\d+).(\d+).jar") @@ -82,13 +82,14 @@ def install_mm(download_mm_nightly): mm_running = False mm_install_dir = os.path.join(os.path.expanduser('~'), "Micro-Manager-nightly") + # check if there is currently a Micro-manager instance running (used for local testing) if is_port_in_use(4827): mm_running = True print('Using Micro-manager running on port 4827 for testing') yield else: - if os.path.isdir(mm_install_dir): + if os.path.isdir(mm_install_dir) and os.listdir(mm_install_dir): # Check if Micro-manager installation is present in mm_install_dir. # If so, the latest Micro-manager nightly build will not be installed. print(f'Existing Micro-manager installation found at {mm_install_dir}') @@ -107,7 +108,9 @@ def install_mm(download_mm_nightly): "~/Micro-Manager-nightly"''' ) - os.mkdir(mm_install_dir) + # mkdir if not exists + if not os.path.isdir(mm_install_dir): + os.mkdir(mm_install_dir) print(f'Installing Micro-manager nightly build at: {mm_install_dir}') cmd = f"{mm_installer} /SP /VERYSILENT /SUPRESSMSGBOXES /CURRENTUSER /DIR={mm_install_dir} /LOG={mm_install_log_path}" @@ -122,11 +125,25 @@ def install_mm(download_mm_nightly): else: raise RuntimeError('Could not find pycro-manager/java path') - # Update pycromanager jar files packaged with the Micro-manager nightly build + + # Delete the pycromanagerjava.jar file that is packaged with the nightly build + # use a wildcard to match the version number + pycromanager_jar_path = os.path.join(mm_install_dir, 'plugins', 'Micro-Manager', 'PycromanagerJava-[0-9]*.[0-9]*.[0-9]*.jar') + for file_path in glob.glob(pycromanager_jar_path): + os.remove(file_path) + print(f'Removed {file_path}') + + # Copy the pycromanagerjava.jar file that was compiled by the github action + # into the nightly build so that it will test with the latest code + compiled_jar_path = os.path.join(java_path, 'target', 'PycromanagerJava-[0-9]*.[0-9]*.[0-9].jar') # Destination path where the jar file should be copied to + destination_path = os.path.join(mm_install_dir, 'plugins', 'Micro-Manager', 'PycromanagerJava.jar') + # Find the actual file that matches the pattern and copy it to the destination + for file_path in glob.glob(compiled_jar_path): + shutil.copy2(file_path, destination_path) + print(f'Copied {file_path} to {destination_path}') + + # Update pycromanager dependency jar files packaged with the Micro-manager nightly build # Files are updated only if they are larger version - if os.path.isdir(os.path.join(java_path, 'target')): - replace_jars(os.path.join(java_path, 'target'), os.path.join(mm_install_dir, 'plugins', 'Micro-Manager'), - ['PycroManagerJava']) # Copy dependency jar files if present in target/dependency if os.path.isdir(os.path.join(java_path, 'target/dependency')): replace_jars(os.path.join(java_path, 'target/dependency'), os.path.join(mm_install_dir, 'plugins', 'Micro-Manager'), @@ -161,20 +178,18 @@ def setup_data_folder(): def launch_mm_headless(install_mm): mm_install_dir = install_mm if mm_install_dir is None: - return # local manual testing where MM has been launched from source + yield # local manual testing where MM has been launched from source + else: + config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') + print('Launching Micro-manager in headless mode.') - config_file = os.path.join(mm_install_dir, 'MMConfig_demo.cfg') + # MM doesn't ship with Java on Mac so allow it to be defined here + java_loc = None + if "JAVA" in os.environ: + java_loc = os.environ["JAVA"] - print('Launching Micro-manager in headless mode.') - - # MM doesn't ship with Java on Mac so allow it to be defined here - java_loc = None - if "JAVA" in os.environ: - java_loc = os.environ["JAVA"] + start_headless(mm_install_dir, config_file, java_loc=java_loc, debug=True) - start_headless(mm_install_dir, config_file, java_loc=java_loc) + yield None - # yield - # - # cleanup() - # pass + stop_headless(debug=True) diff --git a/pycromanager/test/test_acquisition.py b/pycromanager/test/test_acquisition.py index bad90cb1..ced6827c 100644 --- a/pycromanager/test/test_acquisition.py +++ b/pycromanager/test/test_acquisition.py @@ -2,7 +2,7 @@ import pytest import time from pycromanager import Acquisition, Core, multi_d_acquisition_events -from pycromanager.acquisitions import AcqAlreadyCompleteException +from pycromanager.acquisition.acquisition_superclass import AcqAlreadyCompleteException def check_acq_sequenced(events, expected_num_events): @@ -21,7 +21,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) dataset = acq.get_dataset() @@ -39,7 +39,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) dataset = acq.get_dataset() @@ -143,7 +143,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -191,7 +191,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -216,7 +216,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) # check that the exposure time was correctly set @@ -248,7 +248,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -271,7 +271,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -294,7 +294,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -324,7 +324,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) # check that the exposure time was correctly set @@ -357,7 +357,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -381,7 +381,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) @@ -405,7 +405,7 @@ def hook_fn(_events): return None # no need to actually acquire the data with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) def test_time_noseq_z_seq_interval_acq(launch_mm_headless, setup_data_folder): @@ -425,7 +425,7 @@ def hook_fn(_events): t_start = time.time() with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) t_end = time.time() @@ -445,7 +445,7 @@ def hook_fn(_events): mmc.set_exposure(1000) with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: events = multi_d_acquisition_events(1000) acq.acquire(events) time.sleep(10) @@ -479,7 +479,7 @@ def test_abort_from_external(launch_mm_headless, setup_data_folder): acq.acquire(events[0]) # this simulates an abort from the java side unbeknownst to python side # it comes from a new thread so it is non-blocking to the port - acq._remote_acq.abort() + acq._acq.abort() for event in events[1:]: acq.acquire(event) time.sleep(5) @@ -498,7 +498,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: events = multi_d_acquisition_events(z_start=0, z_end=999, z_step=1) acq.acquire(events) time.sleep(4) @@ -595,7 +595,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) dataset = acq.get_dataset() @@ -622,7 +622,7 @@ def hook_fn(_events): return _events with Acquisition(setup_data_folder, 'acq', show_display=False, - pre_hardware_hook_fn=hook_fn) as acq: + pre_hardware_hook_fn=hook_fn) as acq: acq.acquire(events) dataset = acq.get_dataset() diff --git a/pycromanager/test/test_hook_functions.py b/pycromanager/test/test_hook_functions.py index 8ca875dc..9675d945 100644 --- a/pycromanager/test/test_hook_functions.py +++ b/pycromanager/test/test_hook_functions.py @@ -15,7 +15,7 @@ def hook_fn(image, metadata): return image, metadata with Acquisition(setup_data_folder, 'acq', show_display=False, - image_process_fn=hook_fn) as acq: + image_process_fn=hook_fn) as acq: acq.acquire(events) dataset = acq.get_dataset() @@ -35,7 +35,7 @@ def hook_fn(image, metadata): return None with Acquisition(directory=None, name='acq', show_display=False, - image_process_fn=hook_fn) as acq: + image_process_fn=hook_fn) as acq: acq.acquire(events) dataset = acq.get_dataset() # Can this be moved out of the Acquisition context? diff --git a/pycromanager/zmq_bridge/_bridge.py b/pycromanager/zmq_bridge/_bridge.py index c101d9f9..7d7cd9b3 100644 --- a/pycromanager/zmq_bridge/_bridge.py +++ b/pycromanager/zmq_bridge/_bridge.py @@ -221,13 +221,13 @@ class _Bridge: _bridge_creation_lock = threading.Lock() _cached_bridges_by_port_and_thread = {} - - def __new__(cls, port: int=DEFAULT_PORT, timeout: int=DEFAULT_TIMEOUT, convert_camel_case: bool=True, - debug: bool=False, *args, **kwargs): + @staticmethod + def create_or_get_existing_bridge(port: int=DEFAULT_PORT, convert_camel_case: bool=True, + debug: bool=False, ip_address: str="127.0.0.1", timeout: int=DEFAULT_TIMEOUT, iterate: bool = False): """ - Only one instance of Bridge per a thread/port combo + Get a bridge for a given port and thread. If a bridge for that port/thread combo already exists, + return it """ - # synchronize this method so multiple threads don't try to create a bridge at the same time with _Bridge._bridge_creation_lock: thread_id = threading.current_thread().ident port_thread_id = (port, thread_id) @@ -246,7 +246,34 @@ def __new__(cls, port: int=DEFAULT_PORT, timeout: int=DEFAULT_TIMEOUT, convert_c if debug: print("DEBUG: creating new beidge for port {} thread {}".format( port, threading.current_thread().name)) - return super(_Bridge, cls).__new__(cls) + return _Bridge(port, convert_camel_case, debug, ip_address, timeout, iterate) + + + # def __new__(cls, port: int=DEFAULT_PORT, timeout: int=DEFAULT_TIMEOUT, convert_camel_case: bool=True, + # debug: bool=False, *args, **kwargs): + # """ + # Only one instance of Bridge per a thread/port combo + # """ + # # synchronize this method so multiple threads don't try to create a bridge at the same time + # with _Bridge._bridge_creation_lock: + # thread_id = threading.current_thread().ident + # port_thread_id = (port, thread_id) + # + # # return the existing cached bridge if it exists, otherwise make a new one + # if port_thread_id in _Bridge._cached_bridges_by_port_and_thread.keys(): + # bridge = _Bridge._cached_bridges_by_port_and_thread[port_thread_id]() + # if bridge is None: + # raise Exception("Bridge for port {} and thread {} has been " + # "closed but not removed".format(port, threading.current_thread().name)) + # if debug: + # print("DEBUG: returning cached bridge for port {} thread {}".format( + # port, threading.current_thread().name)) + # return bridge + # else: + # if debug: + # print("DEBUG: creating new beidge for port {} thread {}".format( + # port, threading.current_thread().name)) + # return super(_Bridge, cls).__new__(cls) def __init__( @@ -254,6 +281,7 @@ def __init__( debug: bool=False, ip_address: str="127.0.0.1", timeout: int=DEFAULT_TIMEOUT, iterate: bool = False ): """ + This constructor should not be called directly. Instead, use the static method create_or_get_existing_bridge Parameters ---------- port : int @@ -267,16 +295,15 @@ def __init__( iterate : bool If True, ListArray will be iterated and give lists """ - with _Bridge._bridge_creation_lock: - thread_id = threading.current_thread().ident - port_thread_id = (port, thread_id) - if port_thread_id in _Bridge._cached_bridges_by_port_and_thread.keys(): - return # already initialized - self._port_thread_id = port_thread_id - # store weak refs so that the existence of thread/port bridge caching doesn't prevent - # the garbage collection of unused bridge objects - self._weak_self_ref = weakref.ref(self) - _Bridge._cached_bridges_by_port_and_thread[port_thread_id] = self._weak_self_ref + thread_id = threading.current_thread().ident + port_thread_id = (port, thread_id) + # if port_thread_id in _Bridge._cached_bridges_by_port_and_thread.keys(): + # return # already initialized + self._port_thread_id = port_thread_id + # store weak refs so that the existence of thread/port bridge caching doesn't prevent + # the garbage collection of unused bridge objects + self._weak_self_ref = weakref.ref(self) + _Bridge._cached_bridges_by_port_and_thread[port_thread_id] = self._weak_self_ref self._ip_address = ip_address self.port = port @@ -392,8 +419,10 @@ def _construct_java_object(self, classpath: str, new_socket: bool=False, args: l serialized_object = self._main_socket.receive() if new_socket: # create a new bridge over a different port - bridge = _Bridge(port=serialized_object["port"], ip_address=self._ip_address, - timeout=self._timeout, debug=debug) + bridge = _Bridge.create_or_get_existing_bridge( + port=serialized_object["port"], ip_address=self._ip_address, timeout=self._timeout, debug=debug) + # bridge = _Bridge(port=serialized_object["port"], ip_address=self._ip_address, + # timeout=self._timeout, debug=debug) else: bridge = self @@ -427,7 +456,7 @@ def _get_java_class(self, classpath: str, new_socket: bool=False, debug: bool=Fa if new_socket: # create a new bridge over a different port - bridge = _Bridge(port=serialized_object["port"], ip_address=self._ip_address, + bridge = _Bridge.create_or_get_existing_bridge(port=serialized_object["port"], ip_address=self._ip_address, timeout=self._timeout, debug=debug) else: bridge = self @@ -604,7 +633,7 @@ def _get_bridge(self): if threading.current_thread().ident == self._creation_thread: bridge_to_use = self._creation_bridge else: - bridge_to_use = _Bridge( + bridge_to_use = _Bridge.create_or_get_existing_bridge( port=port, convert_camel_case=self._convert_camel_case, ip_address=self._ip_address, @@ -1023,7 +1052,7 @@ def _camel_case_2_snake_case(name): # Test basic bridge operations import traceback - b = _Bridge() + b = _Bridge.create_or_get_existing_bridge() try: s = b.get_studio() except: diff --git a/pycromanager/zmq_bridge/wrappers.py b/pycromanager/zmq_bridge/wrappers.py index 1f06f08d..bc952e25 100644 --- a/pycromanager/zmq_bridge/wrappers.py +++ b/pycromanager/zmq_bridge/wrappers.py @@ -70,7 +70,7 @@ def __new__( debug: print debug messages """ - bridge = _Bridge(port=port, timeout=timeout, convert_camel_case=convert_camel_case, debug=debug) + bridge = _Bridge.create_or_get_existing_bridge(port=port, timeout=timeout, convert_camel_case=convert_camel_case, debug=debug) return bridge._construct_java_object(classpath, new_socket=new_socket, args=args) @@ -104,5 +104,5 @@ def __new__( debug: print debug messages """ - bridge = _Bridge(port=port, timeout=timeout, convert_camel_case=convert_camel_case, debug=debug) + bridge = _Bridge.create_or_get_existing_bridge(port=port, timeout=timeout, convert_camel_case=convert_camel_case, debug=debug) return bridge._get_java_class(classpath, new_socket=new_socket) diff --git a/requirements.txt b/requirements.txt index 67cdaed0..0462cfe5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,4 @@ dask[array]>=2022.2.0 pyzmq ndtiff>=2.2.0 docstring-inheritance +pymmcore diff --git a/scripts/acq_hook.py b/scripts/acq_hook.py index 182937bb..cc2dc3b9 100644 --- a/scripts/acq_hook.py +++ b/scripts/acq_hook.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events import numpy as np @@ -7,7 +7,7 @@ def hook_fn(event): return event -with Acquisition( +with JavaBackendAcquisition( directory="/Users/henrypinkard/tmp", name="acquisition_name", post_camera_hook_fn=hook_fn, diff --git a/scripts/bridge_test.py b/scripts/bridge_test.py index dc11d198..0bfbc339 100644 --- a/scripts/bridge_test.py +++ b/scripts/bridge_test.py @@ -1,8 +1,8 @@ -from pycromanager import Core +from pycromanager import ZMQRemoteMMCoreJ -core = Core() +core = ZMQRemoteMMCoreJ() -core2 = Core() +core2 = ZMQRemoteMMCoreJ() del core del core2 diff --git a/scripts/bridge_tests.py b/scripts/bridge_tests.py index 3341b0e2..3ec328d7 100644 --- a/scripts/bridge_tests.py +++ b/scripts/bridge_tests.py @@ -1,8 +1,8 @@ -from pycromanager import Core, JavaClass +from pycromanager import ZMQRemoteMMCoreJ, JavaClass from threading import Thread ### Pass object to a different thread -core = Core(debug=False) +core = ZMQRemoteMMCoreJ(debug=False) def other_thread(core): cache = core.get_system_state_cache() print(cache) @@ -12,7 +12,7 @@ def other_thread(core): ### Create an object and a child object on a new socket -core = Core(debug=False) +core = ZMQRemoteMMCoreJ(debug=False) core.get_system_state_cache(new) diff --git a/scripts/bug_test.py b/scripts/bug_test.py index e9fc298d..d019ec74 100644 --- a/scripts/bug_test.py +++ b/scripts/bug_test.py @@ -1,6 +1,6 @@ from pycromanager import _Bridge -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events def img_process_fn(image, metadata): @@ -17,7 +17,7 @@ def img_process_fn(image, metadata): if __name__ == "__main__": # this is important, don't forget it - with Acquisition( + with JavaBackendAcquisition( directory="/Users/henrypinkard/megellandump/", name="exp_2_mda", image_process_fn=img_process_fn, diff --git a/scripts/camera_triggering/genIexamples.py b/scripts/camera_triggering/genIexamples.py index c7567f01..82556ac4 100644 --- a/scripts/camera_triggering/genIexamples.py +++ b/scripts/camera_triggering/genIexamples.py @@ -2,7 +2,7 @@ This example shows how to use pycromanager to interact with the micro-manager core. Aside from the setup section, each following section can be run independently """ -from pycromanager import Core +from pycromanager import ZMQRemoteMMCoreJ import numpy as np import time @@ -11,7 +11,7 @@ ### Setup trigger_arduino = TriggerTester('COM3') -core = Core() +core = ZMQRemoteMMCoreJ() core.set_exposure(500) camera_name = core.get_camera_device() diff --git a/scripts/camera_triggering/sandbox.py b/scripts/camera_triggering/sandbox.py index ffde5288..4deba1b0 100644 --- a/scripts/camera_triggering/sandbox.py +++ b/scripts/camera_triggering/sandbox.py @@ -1,4 +1,4 @@ -from pycromanager import Core +from pycromanager import ZMQRemoteMMCoreJ import numpy as np import time @@ -7,7 +7,7 @@ ### Setup trigger_arduino = TriggerTester('COM3') -core = Core() +core = ZMQRemoteMMCoreJ() core.set_exposure(500) camera_name = core.get_camera_device() diff --git a/scripts/camera_triggering/trigger_get_set_test.py b/scripts/camera_triggering/trigger_get_set_test.py index 3ce5aad7..14af47dd 100644 --- a/scripts/camera_triggering/trigger_get_set_test.py +++ b/scripts/camera_triggering/trigger_get_set_test.py @@ -1,8 +1,8 @@ -from pycromanager import Core +from pycromanager import ZMQRemoteMMCoreJ import numpy as np import time -core = Core() +core = ZMQRemoteMMCoreJ() camera_name = core.get_camera_device() diff --git a/scripts/control_core.py b/scripts/control_core.py index 86336c73..1a964b73 100644 --- a/scripts/control_core.py +++ b/scripts/control_core.py @@ -2,7 +2,7 @@ This example shows how to use pycromanager to interact with the micro-manager core. Aside from the setup section, each following section can be run independently """ -from pycromanager import _Bridge, Core +from pycromanager import _Bridge, ZMQRemoteMMCoreJ import numpy as np import matplotlib.pyplot as plt diff --git a/scripts/core_garbage_collection.py b/scripts/core_garbage_collection.py index 4e92eda6..b19fbd0b 100644 --- a/scripts/core_garbage_collection.py +++ b/scripts/core_garbage_collection.py @@ -1,4 +1,4 @@ -from pycromanager import Core +from pycromanager import ZMQRemoteMMCoreJ import gc @@ -14,7 +14,7 @@ # core.sleep(5) -core = Core(debug=True) +core = ZMQRemoteMMCoreJ(debug=True) print(core) core = None pass \ No newline at end of file diff --git a/scripts/custom_axis_acq.py b/scripts/custom_axis_acq.py index 61fcc5ed..1b423aa5 100644 --- a/scripts/custom_axis_acq.py +++ b/scripts/custom_axis_acq.py @@ -1,8 +1,8 @@ import numpy as np -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events -with Acquisition("/Users/henrypinkard/tmp", "l_axis") as acq: +with JavaBackendAcquisition("/Users/henrypinkard/tmp", "l_axis") as acq: # create one event for the image at each z-slice events = [] for time in range(5): diff --git a/scripts/democam.py b/scripts/democam.py index 23c15caf..cf180031 100644 --- a/scripts/democam.py +++ b/scripts/democam.py @@ -4,11 +4,11 @@ dataset is saved to 'democam_X/Full Resolution/democam_MagellanStack.tif` within the current folder; consecutively numbered `X` separate individual runs of this script. """ -from pycromanager import Acquisition +from pycromanager import JavaBackendAcquisition exposures = [100, 200, 300, 400] -with Acquisition(directory=".", name="democam") as acq: +with JavaBackendAcquisition(directory=".", name="democam") as acq: events = [] for rep in range(3): for idx, exposure in enumerate(exposures): diff --git a/scripts/events_from_processor.py b/scripts/events_from_processor.py index a00c5c40..f873fbb8 100644 --- a/scripts/events_from_processor.py +++ b/scripts/events_from_processor.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events import numpy as np @@ -19,7 +19,7 @@ def img_process_fn(image, metadata, bridge, event_queue): return image, metadata -acq = Acquisition( +acq = JavaBackendAcquisition( directory="/Users/henrypinkard/megllandump", name="acquisition_name", image_process_fn=img_process_fn, diff --git a/scripts/exceptions_in_callbacks.py b/scripts/exceptions_in_callbacks.py index 6fc637e7..ced9d0d2 100644 --- a/scripts/exceptions_in_callbacks.py +++ b/scripts/exceptions_in_callbacks.py @@ -1,4 +1,4 @@ -from pycromanager import Core, Acquisition, multi_d_acquisition_events, start_headless +from pycromanager import ZMQRemoteMMCoreJ, JavaBackendAcquisition, multi_d_acquisition_events, start_headless import time mm_app_path = 'C:/Program Files/Micro-Manager-2.0' @@ -25,10 +25,10 @@ def img_proc_fn(image, metadata): raise Exception("asdfasdf") return image, metadata -with Acquisition(directory=r"C:\Users\henry\Desktop\datadump", name='PM_test2', - pre_hardware_hook_fn=hook_fn, - # image_process_fn=img_proc_fn, - debug=True, timeout=4000) as acq: +with JavaBackendAcquisition(directory=r"C:\Users\henry\Desktop\datadump", name='PM_test2', + pre_hardware_hook_fn=hook_fn, + # image_process_fn=img_proc_fn, + debug=True, timeout=4000) as acq: acq.acquire(multi_d_acquisition_events(num_time_points=4, time_interval_s=5, z_start = 0, z_end = 3, z_step = 1)) acq = None diff --git a/scripts/explore_acq.py b/scripts/explore_acq.py index 6571e2d7..ab540e48 100644 --- a/scripts/explore_acq.py +++ b/scripts/explore_acq.py @@ -1,4 +1,4 @@ -from pycromanager import ExploreAcquisition, Core +from pycromanager import ExploreAcquisition, ZMQRemoteMMCoreJ # core = Core() diff --git a/scripts/external_camera_trigger.py b/scripts/external_camera_trigger.py index b4793fa8..36afb497 100644 --- a/scripts/external_camera_trigger.py +++ b/scripts/external_camera_trigger.py @@ -1,5 +1,5 @@ import numpy as np -from pycromanager import multi_d_acquisition_events, Acquisition +from pycromanager import multi_d_acquisition_events, JavaBackendAcquisition def external_trigger_fn(event): @@ -8,7 +8,7 @@ def external_trigger_fn(event): return event -with Acquisition( +with JavaBackendAcquisition( directory="/Users/henrypinkard/megllandump", name="tcz_acq", post_camera_hook_fn=external_trigger_fn, diff --git a/scripts/generate_ndtiff_test.py b/scripts/generate_ndtiff_test.py index 27635a24..69e2d7cf 100644 --- a/scripts/generate_ndtiff_test.py +++ b/scripts/generate_ndtiff_test.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events, Core, start_headless, XYTiledAcquisition +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events, ZMQRemoteMMCoreJ, start_headless, XYTiledAcquisition import numpy as np import time @@ -13,7 +13,7 @@ # java_loc = None # start_headless(mm_app_path, config_file, java_loc=java_loc, timeout=5000) -core = Core() +core = ZMQRemoteMMCoreJ() #small images to save data core.set_property("Camera", "OnCameraCCDXSize", 32) @@ -24,8 +24,8 @@ -with Acquisition(directory=save_dir, name="ndtiffv3.0_test", show_display=True, - ) as acq: +with JavaBackendAcquisition(directory=save_dir, name="ndtiffv3.0_test", show_display=True, + ) as acq: # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=5, diff --git a/scripts/headless_demo.py b/scripts/headless_demo.py index 3a1977f7..a727f552 100644 --- a/scripts/headless_demo.py +++ b/scripts/headless_demo.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events, Core, start_headless +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events, ZMQRemoteMMCoreJ, start_headless import numpy as np import time @@ -13,7 +13,7 @@ # java_loc = None start_headless(mm_app_path, config_file, java_loc=java_loc) -core = Core() +core = ZMQRemoteMMCoreJ() core.snap_image() print(core.get_image()) @@ -25,9 +25,9 @@ def image_saved_fn(axes, dataset): pixels = dataset.read_image(**axes) print(np.mean(pixels)) -with Acquisition(directory=save_dir, name="tcz_acq", show_display=True, - image_saved_fn=image_saved_fn - ) as acq: +with JavaBackendAcquisition(directory=save_dir, name="tcz_acq", show_display=True, + image_saved_fn=image_saved_fn + ) as acq: # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=5, diff --git a/scripts/image_processor.py b/scripts/image_processor.py index 78a85d4f..e0b112d9 100644 --- a/scripts/image_processor.py +++ b/scripts/image_processor.py @@ -1,5 +1,5 @@ import numpy as np -from pycromanager import multi_d_acquisition_events, Acquisition +from pycromanager import multi_d_acquisition_events, JavaBackendAcquisition # Version 1: def img_process_fn(image, metadata): @@ -7,7 +7,7 @@ def img_process_fn(image, metadata): # raise Exception() return image, metadata -with Acquisition( +with JavaBackendAcquisition( directory=r"C:\Users\henry\Desktop\datadump", name="tcz_acq", image_process_fn=img_process_fn ) as acq: # Generate the events for a single z-stack diff --git a/scripts/image_processor_divert.py b/scripts/image_processor_divert.py index 065feb4a..f08dc556 100644 --- a/scripts/image_processor_divert.py +++ b/scripts/image_processor_divert.py @@ -1,12 +1,12 @@ import numpy as np -from pycromanager import multi_d_acquisition_events, Acquisition +from pycromanager import multi_d_acquisition_events, JavaBackendAcquisition def img_process_fn(image, metadata): print(image) pass # send them somewhere else, not default saving and display -with Acquisition(image_process_fn=img_process_fn) as acq: +with JavaBackendAcquisition(image_process_fn=img_process_fn) as acq: # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=10, diff --git a/scripts/image_processor_multiple.py b/scripts/image_processor_multiple.py index 32fb7033..115ded99 100644 --- a/scripts/image_processor_multiple.py +++ b/scripts/image_processor_multiple.py @@ -1,5 +1,5 @@ import numpy as np -from pycromanager import multi_d_acquisition_events, Acquisition +from pycromanager import multi_d_acquisition_events, JavaBackendAcquisition import copy @@ -23,7 +23,7 @@ def img_process_fn(image, metadata): return [(image, metadata), (image2, md_2)] -with Acquisition( +with JavaBackendAcquisition( directory="/Users/henrypinkard/megllandump", name="tcz_acq", image_process_fn=img_process_fn ) as acq: # Generate the events for a single z-stack diff --git a/scripts/keep_shutter_open.py b/scripts/keep_shutter_open.py index 700014d3..71eebfdd 100644 --- a/scripts/keep_shutter_open.py +++ b/scripts/keep_shutter_open.py @@ -1,8 +1,8 @@ import numpy as np -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events -with Acquisition("/Users/henrypinkard/megllandump", "l_axis") as acq: +with JavaBackendAcquisition("/Users/henrypinkard/megllandump", "l_axis") as acq: # create one event for the image at each z-slice for time in range(5): z_stack = [] diff --git a/scripts/magellan_focus_surface.py b/scripts/magellan_focus_surface.py index 86e8e0bc..b824a912 100644 --- a/scripts/magellan_focus_surface.py +++ b/scripts/magellan_focus_surface.py @@ -1,4 +1,4 @@ -from pycromanager import _Bridge, Acquisition +from pycromanager import _Bridge, JavaBackendAcquisition import numpy as np @@ -10,5 +10,5 @@ def hook_fn(event): # magellan example -acq = Acquisition(magellan_acq_index=0, post_hardware_hook_fn=hook_fn) +acq = JavaBackendAcquisition(magellan_acq_index=0, post_hardware_hook_fn=hook_fn) acq.await_completion() diff --git a/scripts/magellan_surfaces.py b/scripts/magellan_surfaces.py index 72547676..ecb41542 100644 --- a/scripts/magellan_surfaces.py +++ b/scripts/magellan_surfaces.py @@ -1,4 +1,4 @@ -from pycromanager import _Bridge, Acquisition +from pycromanager import _Bridge, JavaBackendAcquisition import numpy as np with _Bridge() as bridge: @@ -59,5 +59,5 @@ def hook_fn(event): # Run the acquisition # magellan example -acq = Acquisition(magellan_acq_index=0, post_hardware_hook_fn=hook_fn) +acq = JavaBackendAcquisition(magellan_acq_index=0, post_hardware_hook_fn=hook_fn) acq.await_completion() diff --git a/scripts/max_intensity_projection.py b/scripts/max_intensity_projection.py index 344a20f1..b8e4179c 100644 --- a/scripts/max_intensity_projection.py +++ b/scripts/max_intensity_projection.py @@ -2,7 +2,7 @@ Acquire a time series of Z-stacks, and use and image processor to make a second channel showing the maximum intensity projection of the z stack """ -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events import numpy as np @@ -37,5 +37,5 @@ def img_process_fn(image, metadata): save_dir = 'C:/Program Files/Micro-Manager-2.0' save_name = "max_intesnity_acq" -with Acquisition(directory=save_dir, name=save_name, image_process_fn=img_process_fn) as acq: +with JavaBackendAcquisition(directory=save_dir, name=save_name, image_process_fn=img_process_fn) as acq: acq.acquire(events) diff --git a/scripts/memory_leak_test.py b/scripts/memory_leak_test.py index 0c661f14..571ff063 100644 --- a/scripts/memory_leak_test.py +++ b/scripts/memory_leak_test.py @@ -1,6 +1,6 @@ -from pycromanager import Core, Studio +from pycromanager import ZMQRemoteMMCoreJ, Studio -core = Core() +core = ZMQRemoteMMCoreJ() studio = Studio() for i in range(20): diff --git a/scripts/multi_d_acq.py b/scripts/multi_d_acq.py index 299452aa..d5411af5 100644 --- a/scripts/multi_d_acq.py +++ b/scripts/multi_d_acq.py @@ -1,7 +1,7 @@ -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events -with Acquisition(directory=r"/Users/henrypinkard/tmp", name="tcz_acq", debug=False) as acq: +with JavaBackendAcquisition(directory=r"/Users/henrypinkard/tmp", name="tcz_acq", debug=False) as acq: # Generate the events for a single z-stack events = multi_d_acquisition_events( num_time_points=8, diff --git a/scripts/multi_thread_core.py b/scripts/multi_thread_core.py index cee812a5..89863ac3 100644 --- a/scripts/multi_thread_core.py +++ b/scripts/multi_thread_core.py @@ -1,10 +1,10 @@ -from pycromanager import Acquisition, multi_d_acquisition_events, start_headless, Core +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events, start_headless, ZMQRemoteMMCoreJ import threading def snap_image(): while True: - core = Core() + core = ZMQRemoteMMCoreJ() try: core.snap_image() image = core.get_tagged_image() @@ -21,7 +21,7 @@ def snap_image(): # start_headless(mm_app_path, config_file, timeout=10000) # bridge = Bridge(timeout=1000) - core = Core() + core = ZMQRemoteMMCoreJ() print(core.get_version_info()) t = threading.Thread(target=snap_image, args=()) diff --git a/scripts/multiple_acq.py b/scripts/multiple_acq.py index 20a959fd..03989946 100644 --- a/scripts/multiple_acq.py +++ b/scripts/multiple_acq.py @@ -1,5 +1,5 @@ import os -from pycromanager import Core, Acquisition, multi_d_acquisition_events, start_headless +from pycromanager import ZMQRemoteMMCoreJ, JavaBackendAcquisition, multi_d_acquisition_events, start_headless PORT1 = 4827 PORT2 = 5827 @@ -10,8 +10,8 @@ save_path = r"C:\Users\henry\Desktop\datadump" -acq1 = Acquisition(directory=save_path, name='acq1', port=PORT1, debug=True) -acq2 = Acquisition(directory=save_path, name='acq2', port=PORT1, debug=True) +acq1 = JavaBackendAcquisition(directory=save_path, name='acq1', port=PORT1, debug=True) +acq2 = JavaBackendAcquisition(directory=save_path, name='acq2', port=PORT1, debug=True) acq1.acquire(events1) acq2.acquire(events2) diff --git a/scripts/napari_frontend.py b/scripts/napari_frontend.py index 53d4cd1f..b6618f41 100644 --- a/scripts/napari_frontend.py +++ b/scripts/napari_frontend.py @@ -3,7 +3,7 @@ In other python environments (i.e. notebook), the relevant calls to napari might be different """ from pycromanager import start_headless -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events from napari.qt.threading import thread_worker import threading import napari @@ -33,8 +33,8 @@ def image_saved_callback(axes, d): # This function will run an acquisition on a different thread (because calling # napari.run() will block on this thread def run_acq(): - with Acquisition(directory="/Users/henrypinkard/tmp", name="tcz_acq", - image_saved_fn=image_saved_callback, show_display=False) as acq: + with JavaBackendAcquisition(directory="/Users/henrypinkard/tmp", name="tcz_acq", + image_saved_fn=image_saved_callback, show_display=False) as acq: events = multi_d_acquisition_events( num_time_points=10, time_interval_s=5, channel_group="Channel", channels=["DAPI", "FITC"], diff --git a/scripts/napari_simple.py b/scripts/napari_simple.py index fad916d4..546f541f 100644 --- a/scripts/napari_simple.py +++ b/scripts/napari_simple.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events, start_headless +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events, start_headless import napari # Optional: Launch headless mode, which means Micro-Manager does @@ -8,8 +8,8 @@ # start_headless(mm_app_path, config_file) -acq = Acquisition(directory=r"C:\Users\henry\Desktop\data", name="tcz_acq", - show_display='napari') +acq = JavaBackendAcquisition(directory=r"C:\Users\henry\Desktop\data", name="tcz_acq", + show_display='napari') events = multi_d_acquisition_events(num_time_points=8, time_interval_s=2, z_start=0, z_end=6, z_step=0.7,) acq.acquire(events) diff --git a/scripts/no_saving_test.py b/scripts/no_saving_test.py index 63b0fd14..6d766e4c 100644 --- a/scripts/no_saving_test.py +++ b/scripts/no_saving_test.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events def img_process(image, metadata): @@ -6,6 +6,6 @@ def img_process(image, metadata): # TODO: process and save images. -with Acquisition(image_process_fn=img_process) as acq: +with JavaBackendAcquisition(image_process_fn=img_process) as acq: events = multi_d_acquisition_events(num_time_points=2, time_interval_s=0.1) acq.acquire(events) diff --git a/scripts/speed_test.py b/scripts/speed_test.py index 42c45022..65291e77 100644 --- a/scripts/speed_test.py +++ b/scripts/speed_test.py @@ -1,4 +1,4 @@ -from pycromanager import JavaClass, Core +from pycromanager import JavaClass, ZMQRemoteMMCoreJ tester = JavaClass('org.micromanager.acquisition.internal.acqengjcompat.speedtest.SpeedTest') @@ -6,7 +6,7 @@ dir = r'C:\Users\henry\Desktop\data' name = r'speed\test' -core = Core() +core = ZMQRemoteMMCoreJ() num_time_points = 1000 show_viewer = True diff --git a/scripts/storage_callback.py b/scripts/storage_callback.py index e740f1a2..d0c25b71 100644 --- a/scripts/storage_callback.py +++ b/scripts/storage_callback.py @@ -1,4 +1,4 @@ -from pycromanager import Acquisition, multi_d_acquisition_events +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events import numpy as np def image_saved_fn(axes, dataset): @@ -7,8 +7,8 @@ def image_saved_fn(axes, dataset): # Do something with image pixels/metadata dir = r'C:\Users\henry\Desktop\data' -with Acquisition(directory=dir, name="tcz_acq", debug=D, show_display=False, - image_saved_fn=image_saved_fn) as acq: +with JavaBackendAcquisition(directory=dir, name="tcz_acq", debug=D, show_display=False, + image_saved_fn=image_saved_fn) as acq: events = multi_d_acquisition_events( num_time_points=5, time_interval_s=0, diff --git a/scripts/string_axes.py b/scripts/string_axes.py index 6aecd02f..6cbe26b0 100644 --- a/scripts/string_axes.py +++ b/scripts/string_axes.py @@ -3,11 +3,11 @@ """ -from pycromanager import Acquisition, multi_d_acquisition_events -from pycromanager.acq_util import multi_d_acquisition_events_new +from pycromanager import JavaBackendAcquisition, multi_d_acquisition_events +from pycromanager.headless import multi_d_acquisition_events_new -with Acquisition(directory="/Users/henrypinkard/tmp", name="NDTiff3.2_monochrome", debug=False) as acq: +with JavaBackendAcquisition(directory="/Users/henrypinkard/tmp", name="NDTiff3.2_monochrome", debug=False) as acq: # Generate the events for a single z-stack events = multi_d_acquisition_events_new( num_time_points=8, diff --git a/scripts/threads_and_bridges.py b/scripts/threads_and_bridges.py index bb1c1031..e7439162 100644 --- a/scripts/threads_and_bridges.py +++ b/scripts/threads_and_bridges.py @@ -1,15 +1,15 @@ -from pycromanager.mm_java_classes import Core, JavaObject, JavaClass +from pycromanager.mm_java_classes import ZMQRemoteMMCoreJ, JavaObject, JavaClass from pycromanager import _Bridge import gc import threading def new_b(): - core = Core() + core = ZMQRemoteMMCoreJ() for i in range(100): # core = Dummy() - core = Core() - core2 = Core() + core = ZMQRemoteMMCoreJ() + core2 = ZMQRemoteMMCoreJ() threading.Thread(target=new_b).start()