From 97ce65f9bdc265b7f3b4a6042ea3f5a74bbd70cc Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Mon, 2 Nov 2020 11:13:24 -0500 Subject: [PATCH 01/43] Link read the docs badge to 'develop' version --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 934067c74..e56be6e32 100644 --- a/README.rst +++ b/README.rst @@ -6,8 +6,8 @@ SOCS - Simons Observatory Control System :target: https://github.com/simonsobs/socs/actions?query=workflow%3A%22Build+Develop+Images%22 :alt: GitHub Workflow Status -.. image:: https://readthedocs.org/projects/socs/badge/?version=latest - :target: https://socs.readthedocs.io/en/latest/?badge=latest +.. image:: https://readthedocs.org/projects/socs/badge/?version=develop + :target: https://socs.readthedocs.io/en/develop/?badge=develop :alt: Documentation Status .. image:: https://coveralls.io/repos/github/simonsobs/socs/badge.svg?branch=travis From 1be403b9aa2dd2ece521da274225e2f83ca83c37 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Wed, 4 Nov 2020 20:09:55 -0500 Subject: [PATCH 02/43] Add Synaccess Agent docs to index and alphabetize --- docs/index.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 218efaac3..9f900d3eb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -38,14 +38,15 @@ Simulator Reference Simulators are used to mock software and hardware agents/bluefors_agent agents/cryomech_cpa - agents/scpi_psu agents/labjack agents/lakeshore240 agents/lakeshore372 + agents/meinberg_m1000_agent + agents/pfeiffer agents/pysmurf/index + agents/scpi_psu agents/smurf_recorder - agents/pfeiffer - agents/meinberg_m1000_agent + agents/synacc .. toctree:: :caption: Simulator Reference From af8873af9847c7ff90a4aeaf933affb494c440c0 Mon Sep 17 00:00:00 2001 From: jlashner Date: Fri, 20 Nov 2020 15:59:55 -0800 Subject: [PATCH 03/43] Bump ocs docker --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 7fd67d5f3..9d7f4d809 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ # A container setup with an installation of socs. # Use the ocs image as a base -FROM simonsobs/ocs:v0.7.1 +FROM simonsobs/ocs:v0.7.1-7-g700b889-dev # Copy the current directory contents into the container at /app COPY . /app/socs/ From 78f53f08ab406b5d95e9b003c1f84bc49c6b93ea Mon Sep 17 00:00:00 2001 From: jlashner Date: Tue, 24 Nov 2020 21:44:14 -0800 Subject: [PATCH 04/43] Clamp ds_factor in smurf recorder --- socs/agent/smurf_recorder.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/socs/agent/smurf_recorder.py b/socs/agent/smurf_recorder.py index 4f179e6f5..51d3f1aa8 100644 --- a/socs/agent/smurf_recorder.py +++ b/socs/agent/smurf_recorder.py @@ -461,6 +461,9 @@ def read_stream_data(self): ds_factor = (frame['data'].sample_rate/core.G3Units.Hz) \ // self.target_rate ds_factor = max(int(ds_factor), 1) + n_samples = frame['data'].n_samples + if 1 < n_samples <= ds_factor: + ds_factor = n_samples - 1 times = [ t.time / core.G3Units.s for t in frame['data'].times()[::ds_factor] From bccd738f3605b1fc878c68448101d0bd53845cc1 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Sat, 12 Dec 2020 16:26:07 +0000 Subject: [PATCH 05/43] Bump ocs version to v0.7.1-9-g500447e-dev This introduces so3g v0.1.0-24-g5645096 and all that comes with it, including Int64 support in G3VectorInts. --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9d7f4d809..73b767ef7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ # A container setup with an installation of socs. # Use the ocs image as a base -FROM simonsobs/ocs:v0.7.1-7-g700b889-dev +FROM simonsobs/ocs:v0.7.1-9-g500447e-dev # Copy the current directory contents into the container at /app COPY . /app/socs/ From 81bd87228663f4831bc01a7f9b34d78c5b7840ca Mon Sep 17 00:00:00 2001 From: jlashner Date: Tue, 15 Dec 2020 21:59:27 -0800 Subject: [PATCH 06/43] bump sodetlib version --- agents/pysmurf_controller/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agents/pysmurf_controller/Dockerfile b/agents/pysmurf_controller/Dockerfile index a6a850829..cfd397ced 100644 --- a/agents/pysmurf_controller/Dockerfile +++ b/agents/pysmurf_controller/Dockerfile @@ -1,4 +1,4 @@ -FROM simonsobs/sodetlib:v0.0.1 +FROM simonsobs/sodetlib:v0.1.0 # Set locale ENV LANG C.UTF-8 From 44f663be0c6d115d94e86fb017125799e6bce32a Mon Sep 17 00:00:00 2001 From: JakeSpisak <43190235+JakeSpisak@users.noreply.github.com> Date: Mon, 11 Jan 2021 06:58:52 -0800 Subject: [PATCH 07/43] Implement labjack ljm functionality to allow high sample rates (#129) * Implement labjack ljm functionality * Move ljm import to readthedocs block Also changes docs workflow to set the READTHEDOCS environment variable. * Changes after review Co-authored-by: jlashner --- .github/workflows/develop.yml | 2 +- .github/workflows/official-docker-images.yml | 2 +- .github/workflows/pytest.yml | 2 +- agents/labjack/Dockerfile | 1 - agents/labjack/labjack_agent.py | 152 +++++++++++++------ agents/labjack/requirements.txt | 1 + docs/agents/labjack.rst | 46 +++--- 7 files changed, 137 insertions(+), 69 deletions(-) diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 57b11b1e4..b83d5de06 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -38,7 +38,7 @@ jobs: - name: Test documentation build run: | - docker run --rm socs sh -c "make -C docs/ html" + docker run --rm -e READTHEDOCS=True socs sh -c "make -C docs/ html" # Dockerize - name: Build and push development docker image diff --git a/.github/workflows/official-docker-images.yml b/.github/workflows/official-docker-images.yml index 5f658601d..12016393e 100644 --- a/.github/workflows/official-docker-images.yml +++ b/.github/workflows/official-docker-images.yml @@ -30,7 +30,7 @@ jobs: - name: Test documentation build run: | - docker run --rm socs sh -c "make -C docs/ html" + docker run --rm -e READTHEDOCS=True socs sh -c "make -C docs/ html" # Dockerize - name: Build and push official docker image diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 8e60f138b..e2a46a6c3 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -35,4 +35,4 @@ jobs: - name: Test documentation build run: | - docker run --rm socs sh -c "make -C docs/ html" + docker run --rm -e READTHEDOCS=True socs sh -c "make -C docs/ html" diff --git a/agents/labjack/Dockerfile b/agents/labjack/Dockerfile index c4bce4977..82493532c 100644 --- a/agents/labjack/Dockerfile +++ b/agents/labjack/Dockerfile @@ -22,7 +22,6 @@ RUN ./labjack_ljm_minimal_2020_03_30_x86_64/labjack_ljm_installer.run -- --no-re RUN pip3 install --no-cache-dir https://labjack.com/sites/default/files/software/Python_LJM_2019_04_03.zip - # Run registry on container startup ENTRYPOINT ["dumb-init", "python3", "-u", "labjack_agent.py"] diff --git a/agents/labjack/labjack_agent.py b/agents/labjack/labjack_agent.py index bbcb50f1f..fa5926ac3 100644 --- a/agents/labjack/labjack_agent.py +++ b/agents/labjack/labjack_agent.py @@ -2,7 +2,6 @@ import time import struct import os -from pymodbus.client.sync import ModbusTcpClient import numexpr import yaml import csv @@ -11,6 +10,7 @@ ON_RTD = os.environ.get('READTHEDOCS') == 'True' if not ON_RTD: + from labjack import ljm from ocs import ocs_agent, site_config from ocs.ocs_twisted import TimeoutLock @@ -84,32 +84,38 @@ class LabJackFunctions: def __init__(self): pass - def unit_conversion(self, v, function_info): + def unit_conversion(self, v_array, function_info): """ - Given a voltage and function information from the + Given a voltage array and function information from the labjack_config.yaml file, applies a unit conversion. Returns the converted value and its units. + Args: + v_array (numpy array): The voltages to be converted. + function_info (dict): Specifies the type of function. + If custom, also gives the function. """ - if function_info["user_defined"] == 'False': function = getattr(self, function_info['type']) - return function(v) + return function(v_array) + # Custom function evaluation else: units = function_info['units'] - value = float(numexpr.evaluate(function_info["function"])) - return value, units + new_values = [] + for v in v_array: + new_values.append(float(numexpr.evaluate(function_info["function"]))) + return new_values, units - def MKS390(self, v): + def MKS390(self, v_array): """ Conversion function for the MKS390 Micro-Ion ATM Modular Vaccum Gauge. """ - value = 1.3332*10**(2*v - 11) + value = 1.3332*10**(2*v_array - 11) units = 'mBar' return value, units - def warm_therm(self, v): + def warm_therm(self, v_array): """ Conversion function for SO warm thermometry readout. Voltage is converted to resistance using the LJTick, which @@ -118,7 +124,7 @@ def warm_therm(self, v): for the thermistor model, serial number 10K4D25. """ # LJTick voltage to resistance conversion - R = (2.5-v)*10000/v + R = (2.5-v_array)*10000/v_array # Import the Ohms to Celsius cal curve and apply cubic # interpolation to find the temperature @@ -129,12 +135,17 @@ def warm_therm(self, v): R_cal = np.array([float(RT[1]) for RT in lists[1:]]) T_cal = np.flip(T_cal) R_cal = np.flip(R_cal) - RtoT = interp1d(R_cal, T_cal, kind='cubic') + try: + RtoT = interp1d(R_cal, T_cal, kind='cubic') + values = RtoT(R) + + except ValueError: + print('Temperature outside thermometer range') + values = -1000 + np.zeros(len(R)) - value = float(RtoT(R)) units = 'C' - return value, units + return values, units # LabJack agent class @@ -147,16 +158,16 @@ def __init__(self, agent, ip_address, active_channels, function_file, self.lock = TimeoutLock() self.ip_address = ip_address self.module = None - print(f"Active channels is {active_channels}") + self.ljf = LabJackFunctions() + self.sampling_frequency = sampling_frequency + # Labjack channels to read if active_channels == 'T7-all': - self.sensors = ['Channel_{}'.format(i+1) for i in range(14)] + self.chs = ['AIN{}'.format(i) for i in range(14)] elif active_channels == 'T4-all': - self.sensors = ['Channel_{}'.format(i+1) for i in range(12)] + self.chs = ['AIN{}'.format(i) for i in range(12)] else: - self.sensors = ['Channel_{}'.format(ch) for ch in active_channels] - self.ljf = LabJackFunctions() - self.sampling_frequency = sampling_frequency + self.chs = active_channels # Load dictionary of unit conversion functions from yaml file. Assumes # the file is in the $OCS_CONFIG_DIR directory @@ -167,20 +178,32 @@ def __init__(self, agent, ip_address, active_channels, function_file, function_file) with open(function_file_path, 'r') as stream: self.functions = yaml.safe_load(stream) + if self.functions is None: + self.functions = {} print(f"Applying conversion functions: {self.functions}") self.initialized = False self.take_data = False - # Register feed + # Register main feed. Exclude influx due to potentially high scan rate agg_params = { 'frame_length': 60, + 'exclude_influx': True } - self.agent.register_feed('Sensors', + self.agent.register_feed('sensors', record=True, agg_params=agg_params, buffer_time=1) + # Register downsampled feed for influx. + agg_params_downsampled = { + 'frame_length': 60 + } + self.agent.register_feed('sensors_downsampled', + record=True, + agg_params=agg_params_downsampled, + buffer_time=1) + # Task functions def init_labjack_task(self, session, params=None): """ @@ -197,10 +220,13 @@ def init_labjack_task(self, session, params=None): return False, "Could not acquire lock." session.set_status('starting') - - self.module = ModbusTcpClient(str(self.ip_address)) - - print("Initialized labjack module") + # Connect with the labjack + self.handle = ljm.openS("ANY", "ANY", self.ip_address) + info = ljm.getHandleInfo(self.handle) + print("\nOpened LabJack of type: %i, Connection type: %i,\n" + "Serial number: %i, IP address: %s, Port: %i" % + (info[0], info[1], info[2], + ljm.numberToIP(info[3]), info[4])) session.add_message("Labjack initialized") @@ -225,8 +251,13 @@ def start_acq(self, session, params=None): if params is None: params = {} - f_sample = params.get('sampling_frequency', self.sampling_frequency) - sleep_time = 1/f_sample + # Setup streaming parameters. Data is collected and published in + # blocks at 1 Hz or the scan rate, whichever is less. + scan_rate_input = params.get('sampling_frequency', + self.sampling_frequency) + scans_per_read = max(1, int(scan_rate_input)) + num_chs = len(self.chs) + ch_addrs = ljm.namesToAddresses(num_chs, self.chs)[0] with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: @@ -235,35 +266,62 @@ def start_acq(self, session, params=None): return False, "Could not acquire lock." session.set_status('running') - self.take_data = True + # Start the data stream. Use the scan rate returned by the stream, + # which should be the same as the input scan rate. + scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, + ch_addrs, scan_rate_input) + print(f"\nStream started with a scan rate of {scan_rate} Hz.") + + cur_time = time.time() while self.take_data: data = { - 'timestamp': time.time(), 'block_name': 'sens', 'data': {} } - for i, sens in enumerate(self.sensors): - rr = self.module.read_input_registers(2*i, 2) - data['data'][sens + 'V'] = data_to_float32(rr.registers) - - # Apply unit conversion function for this channel - if sens in self.functions.keys(): - v = data['data'][sens + 'V'] - value, units = \ - self.ljf.unit_conversion(v, self.functions[sens]) - data['data'][sens + '_' + units] = value + # Query the labjack + raw_output = ljm.eStreamRead(self.handle) + output = raw_output[0] - time.sleep(sleep_time) + # Data comes in form ['AIN0_1', 'AIN1_1', 'AIN0_2', ...] + for i, ch in enumerate(self.chs): + ch_output = output[i::num_chs] + data['data'][ch + 'V'] = ch_output - self.agent.publish_to_feed('Sensors', data) - - # Allow this process to be queried to return current data - session.data = data - - self.agent.feeds['Sensors'].flush_buffer() + # Apply unit conversion function for this channel + if ch in self.functions.keys(): + new_ch_output, units = \ + self.ljf.unit_conversion(np.array(ch_output), + self.functions[ch]) + data['data'][ch + units] = list(new_ch_output) + + # The labjack outputs at exactly the scan rate but doesn't + # generate timestamps. So create them here. + timestamps = [cur_time+i/scan_rate for i in range(scans_per_read)] + cur_time += scans_per_read/scan_rate + data['timestamps'] = timestamps + + self.agent.publish_to_feed('sensors', data) + + # Publish to the downsampled data feed only the first + # timestamp and data point for each channel. + data_downsampled = { + 'block_name': 'sens', + 'data': {}, + 'timestamp': timestamps[0] + } + for key, value in data['data'].items(): + data_downsampled['data'][key] = value[0] + self.agent.publish_to_feed('sensors_downsampled', data_downsampled) + session.data = data_downsampled + + # Flush buffer and stop the data stream + self.agent.feeds['sensors'].flush_buffer() + self.agent.feeds['sensors_downsampled'].flush_buffer() + ljm.eStreamStop(self.handle) + print("Data stream stopped") return True, 'Acquisition exited cleanly.' diff --git a/agents/labjack/requirements.txt b/agents/labjack/requirements.txt index 4b5b86f79..6b419d67a 100644 --- a/agents/labjack/requirements.txt +++ b/agents/labjack/requirements.txt @@ -1,2 +1,3 @@ numexpr scipy +labjack-ljm diff --git a/docs/agents/labjack.rst b/docs/agents/labjack.rst index 8ffbbedbc..86b3a240e 100644 --- a/docs/agents/labjack.rst +++ b/docs/agents/labjack.rst @@ -31,7 +31,7 @@ available arguments:: 'instance-id': 'labjack', 'arguments':[ ['--ip-address', '10.10.10.150'], - ['--active-channels', ['1', '2', '3']], + ['--active-channels', ['AIN0', 'AIN1', 'AIN2']], ['--function-file', 'labjack-functions.yaml'], ['--mode', 'acq'], ['--sampling_frequency', '700'], @@ -41,36 +41,44 @@ You should assign your LabJack a static IP, you'll need to know that here. The 'active-channels' argument specifies the channels that will be read out. It can be a list, 'T7-all', or 'T4-all'. The latter two read out all 14 or 12 analog channels on the T7 and T4, respectively. 'sampling_frequency' -is in Hz, and has been tested sucessfully up to about 700 Hz. The 'function-file' -argument specifies the labjack configuration file, which is located in your -OCS configuration directory. This allows analog voltage inputs on the labjack -to be converted to different units. Here is an example labjack configuration -file:: - - Channel_1: +is in Hz, and has been tested sucessfully from 0.1 to 5000 Hz. To avoid +high sample rates potentially clogging up live monitoring, the main feed +doesn't get published to influxdb. Instead influx gets a seperate feed +downsampled to a maximum of 1Hz. Both the main and downsampled feeds are +published to g3 files. + +The 'function-file' argument specifies the labjack configuration file, which +is located in your OCS configuration directory. This allows analog voltage +inputs on the labjack to be converted to different units. Here is an example +labjack configuration file:: + + AIN0: user_defined: 'False' type: "MKS390" - Channel_2: + AIN1: user_defined: 'False' type: 'warm_therm' - Channel_3: + AIN2: user_defined: 'True' units: 'Ohms' function: '(2.5-v)*10000/v' -In this example, Channels 1 and 2 (AIN0 and AIN1 on the labjack) are hooked -up to the MKS390 pressure `gauge`_ and a `thermistor`_ from the SO-specified -warm thermometry setup, respectively. Since these are defined functions in the -LabJackFunctions class, specifying the name of their method is all that is -needed. Channel 3 shows how to define a custom function. In this case, the user -specifies the units and the function itself, which takes the input voltage 'v' -as the only argument. +In this example, channels AIN0 and AIN1 are hooked up to the MKS390 pressure +`gauge`_ and a `thermistor`_ from the SO-specified warm thermometry setup, +respectively. Since these are defined functions in the LabJackFunctions class, +specifying the name of their method is all that is needed. AIN2 shows how to +define a custom function. In this case, the user specifies the units and the +function itself, which takes the input voltage 'v' as the only argument. .. _gauge: https://www.mksinst.com/f/390-micro-ion-atm-modular-vacuum-gauge .. _thermistor: https://docs.rs-online.com/c868/0900766b8142cdef.pdf +.. note:: + The (lower-case) letter 'v' must be used when writing user-defined + functions. No other variable will be parsed correctly. + Docker `````` The LabJack Agent should be configured to run in a Docker container. An @@ -78,6 +86,7 @@ example docker-compose service configuration is shown here:: ocs-labjack: image: simonsobs/ocs-labjack-agent:latest + <<: *log-options hostname: ocs-docker network_mode: "host" volumes: @@ -92,7 +101,7 @@ Example Client -------------- Since labjack functionality is currently limited to acquiring data, which can enabled on startup, users are likely to rarely need a client. This example -shows the basic acquisition funcionality:: +shows the basic acquisition functionality:: #Initialize the labjack from ocs import matched_client @@ -105,6 +114,7 @@ shows the basic acquisition funcionality:: print(session) #Get the current data values 1 second after starting acquistion + import time time.sleep(1) status, message, session = lj.acq.status() print(session["data"]) From 015f25e9ad9b93b610971bfe93d6a1c4252df83e Mon Sep 17 00:00:00 2001 From: jseibert575 Date: Tue, 5 Jan 2021 11:15:59 -0800 Subject: [PATCH 08/43] Handle NAN in downsample factor --- socs/agent/smurf_recorder.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/socs/agent/smurf_recorder.py b/socs/agent/smurf_recorder.py index 51d3f1aa8..d4270b570 100644 --- a/socs/agent/smurf_recorder.py +++ b/socs/agent/smurf_recorder.py @@ -3,6 +3,7 @@ import time import txaio +import numpy as np # For logging txaio.use_twisted() @@ -439,7 +440,12 @@ def run(self): """ self.read_frames() self.check_for_frame_gap(10) - self.read_stream_data() + if len(self.monitored_channels) > 0: + try: + self.read_stream_data() + except Exception as e: + self.log.warn("Exception thrown when reading stream data:\n{e}", e=e) + if self.frames: self.create_new_file() self.write_frames_to_file() @@ -460,6 +466,8 @@ def read_stream_data(self): continue ds_factor = (frame['data'].sample_rate/core.G3Units.Hz) \ // self.target_rate + if np.isnan(ds_factor): + continue ds_factor = max(int(ds_factor), 1) n_samples = frame['data'].n_samples if 1 < n_samples <= ds_factor: From 5c9b01e135eb882d8ddffae951fbd487da256b6d Mon Sep 17 00:00:00 2001 From: jlashner Date: Mon, 11 Jan 2021 18:52:29 -0600 Subject: [PATCH 09/43] Added dumb-init to pysmurf-controller reqs --- agents/pysmurf_controller/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/agents/pysmurf_controller/requirements.txt b/agents/pysmurf_controller/requirements.txt index 29966a9a0..ebd282170 100644 --- a/agents/pysmurf_controller/requirements.txt +++ b/agents/pysmurf_controller/requirements.txt @@ -1 +1,2 @@ mysql-connector>=2.1.6 +dumb-init From 288ddb743b5ccaf2060d1d28b240961181b04faa Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Tue, 19 Jan 2021 10:02:31 -0500 Subject: [PATCH 10/43] Update coveralls command for v3.0.0 v3.0.0 of coveralls-python now requires `--service` be specified for things to work with Github Actions. See [1]. [1] - https://github.com/TheKevJames/coveralls-python/issues/251 --- .github/workflows/develop.yml | 2 +- .github/workflows/pytest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index b83d5de06..6ed12b26f 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -34,7 +34,7 @@ jobs: pip install coveralls coverage combine coverage report - coveralls + coveralls --service=github - name: Test documentation build run: | diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index e2a46a6c3..a575d1c15 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -31,7 +31,7 @@ jobs: pip install coveralls coverage combine coverage report - coveralls + coveralls --service=github - name: Test documentation build run: | From e48b4bb85b5dd8935c39d6f047c624c709225789 Mon Sep 17 00:00:00 2001 From: jlashner Date: Tue, 26 Jan 2021 21:48:54 -0500 Subject: [PATCH 11/43] Adds set_status calls to some operations --- agents/pysmurf_archiver/pysmurf_archiver_agent.py | 2 ++ agents/smurf_recorder/smurf_recorder.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/agents/pysmurf_archiver/pysmurf_archiver_agent.py b/agents/pysmurf_archiver/pysmurf_archiver_agent.py index 3a5025920..7117c3623 100644 --- a/agents/pysmurf_archiver/pysmurf_archiver_agent.py +++ b/agents/pysmurf_archiver/pysmurf_archiver_agent.py @@ -222,6 +222,7 @@ def run(self, session, params=None): it'll increment the `failed_copy_attempts` counter. """ self.running = True + session.set_status('running') while self.running: with get_db_connection(**self.sql_config) as con: @@ -268,6 +269,7 @@ def run(self, session, params=None): def stop(self, session, params=None): """ Stopper for run process """ + session.set_status('stopping') self.running = False diff --git a/agents/smurf_recorder/smurf_recorder.py b/agents/smurf_recorder/smurf_recorder.py index 1009982be..d04e7a0c2 100644 --- a/agents/smurf_recorder/smurf_recorder.py +++ b/agents/smurf_recorder/smurf_recorder.py @@ -136,6 +136,7 @@ def start_record(self, session, params=None): self.data_dir, self.stream_id, target_rate=self.target_rate) + session.set_status('running') while self.is_streaming: recorder.monitored_channels = self.monitored_channels recorder.target_rate = self.target_rate @@ -156,6 +157,7 @@ def stop_record(self, session, params=None): """ self.is_streaming = False + session.set_status('stopping') return True, "Stopping Recording" From 6df3e3f656a5311320dca5121eeecb28b6695be3 Mon Sep 17 00:00:00 2001 From: jlashner Date: Wed, 20 Jan 2021 16:11:56 -0600 Subject: [PATCH 12/43] Add some quality of life improvements to smurf-rec Changes include: 1. Check if socket is open before trying to create G3Streamer to minimize log_fatals 2. Change some info statents into debug, and some debug statements to info 3. Discards G3PipelineInfo frames to fix stream-id-less file on startup bug. --- agents/smurf_recorder/smurf_recorder.py | 1 - socs/agent/smurf_recorder.py | 49 ++++++++++++++++++++----- 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/agents/smurf_recorder/smurf_recorder.py b/agents/smurf_recorder/smurf_recorder.py index d04e7a0c2..9ffa55e8d 100644 --- a/agents/smurf_recorder/smurf_recorder.py +++ b/agents/smurf_recorder/smurf_recorder.py @@ -219,5 +219,4 @@ def make_parser(parser=None): agent.register_task('set_target_rate', listener.set_target_rate, blocking=False) - runner.run(agent, auto_reconnect=True) diff --git a/socs/agent/smurf_recorder.py b/socs/agent/smurf_recorder.py index d4270b570..f6860e528 100644 --- a/socs/agent/smurf_recorder.py +++ b/socs/agent/smurf_recorder.py @@ -4,6 +4,8 @@ import time import txaio import numpy as np +import sys +import socket # For logging txaio.use_twisted() @@ -21,6 +23,19 @@ class FlowControl(Enum): CLEANSE = 3 +def check_port(host, port, timeout=10): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(timeout) + s.setblocking(1) + try: + s.connect((host, port)) + return True + except socket.error as error: + return False + finally: + s.close() + + def _create_dirname(start_time, data_dir, stream_id): """Create the file path for .g3 file output. @@ -162,6 +177,8 @@ def __init__(self, file_duration, tcp_addr, data_dir, stream_id, # Parameters self.time_per_file = file_duration self.address = tcp_addr + self.host, self.port = self.address[6:].split(':') + self.port = int(self.port) self.data_dir = data_dir self.stream_id = stream_id self.log = txaio.make_logger() @@ -169,6 +186,7 @@ def __init__(self, file_duration, tcp_addr, data_dir, stream_id, # Reader/Writer self.reader = None self.writer = None + self.data_received = False # Attributes self.frames = [] @@ -210,12 +228,16 @@ def _establish_reader_connection(self, timeout=5): """ reader = None - try: - reader = core.G3Reader(self.address, - timeout=timeout) - self.log.info("G3Reader connection established") - except RuntimeError: - self.log.error("G3Reader could not connect.") + + if (check_port(self.host, self.port)): + try: + reader = core.G3Reader(self.address, + timeout=0) + self.log.debug("G3Reader connection to {addr} established!", + addr=self.address) + except RuntimeError: + self.log.error("G3Reader could not connect.") + # Prevent rapid connection attempts if self.last_connection_time is not None: @@ -275,13 +297,20 @@ def read_frames(self, timeout=5): # Discard all flow control frames self.frames = [x for x in self.frames if 'sostream_flowcontrol' not in x] - + # Discard Pipeline info frame + self.frames = [x for x in self.frames + if x.type != core.G3FrameType.PipelineInfo] + if self.frames and not self.data_received: + self.data_received = True + self.log.info("Frames received from {addr}", addr=self.address) return else: - self.log.debug("Could not read frames. Connection " + - "timed out, or G3NetworkSender offline. " + - "Cleaning up...") + if self.data_received: + self.log.info("Could not read frames. Connection " + + "timed out, or G3NetworkSender offline. " + + "Cleaning up...") self.close_file() + self.data_received = False self.reader = None def check_for_frame_gap(self, gap_size=5): From 4ad9d006f8b8dd77b9be70481b6b7a723b3f83e0 Mon Sep 17 00:00:00 2001 From: jlashner Date: Wed, 27 Jan 2021 20:31:34 -0500 Subject: [PATCH 13/43] Revert timeout change --- socs/agent/smurf_recorder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/socs/agent/smurf_recorder.py b/socs/agent/smurf_recorder.py index f6860e528..00e5553f6 100644 --- a/socs/agent/smurf_recorder.py +++ b/socs/agent/smurf_recorder.py @@ -232,7 +232,7 @@ def _establish_reader_connection(self, timeout=5): if (check_port(self.host, self.port)): try: reader = core.G3Reader(self.address, - timeout=0) + timeout=timeout) self.log.debug("G3Reader connection to {addr} established!", addr=self.address) except RuntimeError: From 9a9af28db6177731b99f034ae34d9eb22dd209e7 Mon Sep 17 00:00:00 2001 From: jlashner Date: Sun, 31 Jan 2021 12:13:55 -0500 Subject: [PATCH 14/43] Addressed brian's pr comments - pep8 - changed socket blocking to True and timeout to 0.0 sec. - docstrings --- socs/agent/smurf_recorder.py | 40 ++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/socs/agent/smurf_recorder.py b/socs/agent/smurf_recorder.py index 00e5553f6..94fd102a9 100644 --- a/socs/agent/smurf_recorder.py +++ b/socs/agent/smurf_recorder.py @@ -4,7 +4,6 @@ import time import txaio import numpy as np -import sys import socket # For logging @@ -23,14 +22,29 @@ class FlowControl(Enum): CLEANSE = 3 -def check_port(host, port, timeout=10): +def check_port(addr): + """ + This function checks if socket port is currently open on a host. This can + be used to check if the smurf-streamer has created it's G3NetworkSender + object without attempting to create a G3Reader. This function is + non-blocking and should return immediately. + + Parameters + ---------- + addr: str + Address describing the port to connect to. For example: + ``tcp://localhost:4532`` + """ + host, port = addr.split('//')[-1].split(':') + port = int(port) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(timeout) - s.setblocking(1) + s.setblocking(True) + s.settimeout(0.0) try: s.connect((host, port)) return True - except socket.error as error: + except socket.error: return False finally: s.close() @@ -133,6 +147,8 @@ class FrameRecorder: G3Reader object to read the frames from the G3NetworkSender. writer : spt3g.core.G3Writer G3Writer for writing the frames to disk. + data_received : bool + Whether data has been received by the current instance of the G3Reader. frames : list List of frames that have been read from the network. Gets cleared after writing to file. @@ -177,8 +193,6 @@ def __init__(self, file_duration, tcp_addr, data_dir, stream_id, # Parameters self.time_per_file = file_duration self.address = tcp_addr - self.host, self.port = self.address[6:].split(':') - self.port = int(self.port) self.data_dir = data_dir self.stream_id = stream_id self.log = txaio.make_logger() @@ -229,16 +243,15 @@ def _establish_reader_connection(self, timeout=5): """ reader = None - if (check_port(self.host, self.port)): + if (check_port(self.address)): try: reader = core.G3Reader(self.address, timeout=timeout) self.log.debug("G3Reader connection to {addr} established!", - addr=self.address) + addr=self.address) except RuntimeError: self.log.error("G3Reader could not connect.") - # Prevent rapid connection attempts if self.last_connection_time is not None: t_diff = time.time() - self.last_connection_time @@ -302,13 +315,14 @@ def read_frames(self, timeout=5): if x.type != core.G3FrameType.PipelineInfo] if self.frames and not self.data_received: self.data_received = True - self.log.info("Frames received from {addr}", addr=self.address) + self.log.info("Started receiving frames from {addr}", + addr=self.address) return else: if self.data_received: self.log.info("Could not read frames. Connection " + - "timed out, or G3NetworkSender offline. " + - "Cleaning up...") + "timed out, or G3NetworkSender offline. " + + "Cleaning up...") self.close_file() self.data_received = False self.reader = None From b1fed73cf9a72432d6d7b4d7a77342102d78c89d Mon Sep 17 00:00:00 2001 From: Jake Spisak Date: Mon, 8 Feb 2021 16:22:16 -0800 Subject: [PATCH 15/43] Catch labjack stream error --- agents/labjack/labjack_agent.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/agents/labjack/labjack_agent.py b/agents/labjack/labjack_agent.py index fa5926ac3..2b1ec6730 100644 --- a/agents/labjack/labjack_agent.py +++ b/agents/labjack/labjack_agent.py @@ -11,6 +11,7 @@ ON_RTD = os.environ.get('READTHEDOCS') == 'True' if not ON_RTD: from labjack import ljm + from labjack.ljm.ljm import LJMError from ocs import ocs_agent, site_config from ocs.ocs_twisted import TimeoutLock @@ -270,8 +271,14 @@ def start_acq(self, session, params=None): # Start the data stream. Use the scan rate returned by the stream, # which should be the same as the input scan rate. - scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, - ch_addrs, scan_rate_input) + try: + scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, + ch_addrs, scan_rate_input) + except LJMError: #in case the stream is running + print("Stopping previous stream") + ljm.eStreamStop(self.handle) + scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, + ch_addrs, scan_rate_input) print(f"\nStream started with a scan rate of {scan_rate} Hz.") cur_time = time.time() From 48e29e8c1a13f80e97791e057325da2c118d76bb Mon Sep 17 00:00:00 2001 From: Jack Lashner Date: Thu, 11 Feb 2021 14:01:02 -0800 Subject: [PATCH 16/43] Fix bug that prevents npy files from being archived --- agents/pysmurf_monitor/pysmurf_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agents/pysmurf_monitor/pysmurf_monitor.py b/agents/pysmurf_monitor/pysmurf_monitor.py index b7c6ee433..fae15be44 100644 --- a/agents/pysmurf_monitor/pysmurf_monitor.py +++ b/agents/pysmurf_monitor/pysmurf_monitor.py @@ -121,7 +121,7 @@ def datagramReceived(self, _data, addr): 'instance_id': instance, 'copied': 0, 'failed_copy_attempts': 0, - 'md5sum': get_md5sum(d['path']), + 'md5sum': get_md5sum(path), 'socs_version': socs.__version__, } From 77d52dd58f64cc7c42475cc8c2124271a71468cc Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Sat, 13 Feb 2021 09:29:45 -0500 Subject: [PATCH 17/43] Update to Ubuntu 20.04 based ocs image --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 73b767ef7..bf1d6a449 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ # A container setup with an installation of socs. # Use the ocs image as a base -FROM simonsobs/ocs:v0.7.1-9-g500447e-dev +FROM simonsobs/ocs:v0.7.1-17-g1162576-dev # Copy the current directory contents into the container at /app COPY . /app/socs/ From 66bef107782e1dda0be5863b50c11c395f4a9f2d Mon Sep 17 00:00:00 2001 From: Michael Randall <59715680+mjrand@users.noreply.github.com> Date: Thu, 18 Feb 2021 09:05:05 -0800 Subject: [PATCH 18/43] New Tektronix3021C AWG agent. (#99) * Created Tektronix AWG agent Fixed tektronix agent via Brian's comments Made changes to tektronix agent via Brian's code Fixed docs building issues Delete test.py * Fix docstring on TektronixAWGAgent class * Added cmd to tektronix fieldnames Co-authored-by: Brian Koopman Co-authored-by: jlashner --- agents/tektronix3021c/Dockerfile | 18 ++ agents/tektronix3021c/tektronix_agent.py | 211 +++++++++++++++++++++++ docs/agents/tektronix3021c.rst | 85 +++++++++ docs/conf.py | 6 +- docs/index.rst | 1 + socs/agent/tektronix3021c_driver.py | 72 ++++++++ 6 files changed, 391 insertions(+), 2 deletions(-) create mode 100644 agents/tektronix3021c/Dockerfile create mode 100644 agents/tektronix3021c/tektronix_agent.py create mode 100644 docs/agents/tektronix3021c.rst create mode 100644 socs/agent/tektronix3021c_driver.py diff --git a/agents/tektronix3021c/Dockerfile b/agents/tektronix3021c/Dockerfile new file mode 100644 index 000000000..efd5143cb --- /dev/null +++ b/agents/tektronix3021c/Dockerfile @@ -0,0 +1,18 @@ +# SOCS AWG Agent +# socs Agent container for interacting with AWG over GPIB to Ethernet +# converts. + +# Use socs base image +FROM socs:latest + +# Set the working directory to registry directory +WORKDIR /app/socs/agents/tektronix3021c/ + +## Copy this agent into the app/agents directory +COPY . . + +# Run agent on container startup +ENTRYPOINT ["python3", "-u", "tektronix_agent.py"] + +CMD ["--site-hub=ws://sisock-crossbar:8001/ws", \ + "--site-http=http://sisock-crossbar:8001/call"] diff --git a/agents/tektronix3021c/tektronix_agent.py b/agents/tektronix3021c/tektronix_agent.py new file mode 100644 index 000000000..74ea4ef88 --- /dev/null +++ b/agents/tektronix3021c/tektronix_agent.py @@ -0,0 +1,211 @@ +"""Michael Randall +mrandall@ucsd.edu""" + +import time +import os +import socket +import argparse + +from socs.agent.tektronix3021c_driver import tektronixInterface + +on_rtd = os.environ.get('READTHEDOCS') == 'True' +if not on_rtd: + from ocs import ocs_agent, site_config + from ocs.ocs_twisted import TimeoutLock + + +class TektronixAWGAgent: + """Tektronix3021c Agent. + + Args: + ip_address (string): the IP address of the gpib to ethernet + controller connected to the function generator. + + gpib_slot (int): the gpib address currently set + on the function generator. + + """ + def __init__(self, agent, ip_address, gpib_slot): + self.agent = agent + self.log = agent.log + self.lock = TimeoutLock() + + self.job = None + + self.ip_address = ip_address + self.gpib_slot = gpib_slot + self.monitor = False + + self.awg = None + # Registers data feeds + agg_params = { + 'frame_length': 60, + } + self.agent.register_feed('awg', + record=True, + agg_params=agg_params) + + def init_awg(self, session, params=None): + """ Task to connect to Tektronix AWG """ + + with self.lock.acquire_timeout(0) as acquired: + if not acquired: + return False, "Could not acquire lock" + + try: + self.awg = tektronixInterface(self.ip_address, self.gpib_slot) + self.idn = self.awg.identify() + + except socket.timeout as e: + self.log.error("""Tektronix AWG + timed out during connect -> {}""".format(e)) + return False, "Timeout" + + self.log.info("Connected to AWG: {}".format(self.idn)) + + return True, 'Initialized AWG.' + + def set_frequency(self, session, params=None): + """ + Sets frequency of function generator: + + Args: + frequency (float): Frequency to set in Hz. + Must be between 0 and 25,000,000. + """ + + with self.lock.acquire_timeout(1) as acquired: + if acquired: + freq = params.get("frequency") + + try: + float(freq) + + except ValueError as e: + return False, """Frequency must + be a float or int -> {}""".format(e) + + except TypeError as e: + return False, """Frequency must + not be of NoneType -> {}""".format(e) + + if 0 < freq < 25E6: + self.awg.setFreq(freq) + + data = {'timestamp': time.time(), + 'block_name': "AWG_frequency_cmd", + 'data': {'AWG_frequency_cmd': freq} + } + self.agent.publish_to_feed('awg', data) + + else: + return False, """Invalid input: + Frequency must be between 0 and 25,000,000 Hz""" + + else: + return False, "Could not acquire lock" + + return True, 'Set frequency {} Hz'.format(params) + + def set_amplitude(self, session, params=None): + """ + Sets current of power supply: + + Args: + amplitude (float): Peak to Peak voltage to set. + Must be between 0 and 10. + """ + with self.lock.acquire_timeout(1) as acquired: + if acquired: + amp = params.get('amplitude') + try: + float(amp) + + except ValueError as e: + return False, """Amplitude must be + a float or int -> {}""".format(e) + + except TypeError as e: + return False, """Amplitude must not be + of NoneType -> {}""".format(e) + + if 0 < amp < 10: + self.awg.setAmp(amp) + + data = {'timestamp': time.time(), + 'block_name': "AWG_amplitude_cmd", + 'data': {'AWG_amplitude_cmd': amp} + } + self.agent.publish_to_feed('awg', data) + + else: + return False, """Amplitude must be + between 0 and 10 Volts peak to peak""" + + else: + return False, "Could not acquire lock" + + return True, 'Set amplitude to {} Vpp'.format(params) + + def set_output(self, session, params=None): + """ + Task to turn channel on or off. + + Args: + state (bool): True for on, False for off. + """ + with self.lock.acquire_timeout(1) as acquired: + if acquired: + state = params.get("state") + + try: + bool(state) + + except ValueError as e: + return False, "State must be a boolean -> {}".format(e) + + except TypeError as e: + return False, """State must not + be of NoneType -> {}""".format(e) + + self.awg.setOutput(state) + + data = {'timestamp': time.time(), + 'block_name': "AWG_output_cmd", + 'data': {'AWG_output_cmd': int(state)} + } + self.agent.publish_to_feed('awg', data) + + else: + return False, "Could not acquire lock" + + return True, 'Set Output to {}.'.format(params) + + +def make_parser(parser=None): + if parser is None: + parser = argparse.ArgumentParser() + + pgroup = parser.add_argument_group('Agent Options') + pgroup.add_argument('--ip-address', type=str, + help="IP address of tektronix device") + pgroup.add_argument('--gpib-slot', type=int, + help="GPIB slot of tektronix device") + return parser + + +if __name__ == '__main__': + + parser = make_parser() + args = site_config.parse_args(agent_class="Tektronix AWG", parser=parser) + + agent, runner = ocs_agent.init_site_agent(args) + + p = TektronixAWGAgent(agent, args.ip_address, args.gpib_slot) + + agent.register_task('init', p.init_awg, startup=True) + agent.register_task('set_frequency', p.set_frequency) + agent.register_task('set_amplitude', p.set_amplitude) + agent.register_task('set_output', p.set_output) + + runner.run(agent, auto_reconnect=True) diff --git a/docs/agents/tektronix3021c.rst b/docs/agents/tektronix3021c.rst new file mode 100644 index 000000000..8783ae317 --- /dev/null +++ b/docs/agents/tektronix3021c.rst @@ -0,0 +1,85 @@ +.. highlight:: rst + +.. _tektronix3021c: + +=================== +Tektronix AWG Agent +=================== + +This agent uses Standard Commands for Programmable Instruments (SCPI) +It works for many function generators, including the Tektronix3021c. +It connects to the function generator over ethernet, and allows +users to set frequency, peak to peak voltage, and turn the AWG on/off. + +.. argparse:: + :filename: ../agents/tektronix3021c/tektronix_agent.py + :func: make_parser + :prog: python3 tektronix_agent.py + + +Configuration File Examples +--------------------------- +Below are configuration examples for the ocs config file and for running the +Agent in a docker container. + +ocs-config +`````````` +To configure the Tektronix AWG Agent we need to add a block to our ocs +configuration file. Here is an example configuration block using all of +the available arguments:: + + {'agent-class': 'TektronixAWGAgent', + 'instance-id': 'tektronix', + 'arguments': [ + ['--ip-address', '10.10.10.5'], + ['--gpib-slot', '1'] + ]}, + +Most function generators (including the Tektronix 3021c) +have GPIB ports rather than ethernet ports. Therefore a GPIB-to-ethernet +converter is required, and the gpib slot must be specified in the ocs +configuration file. The IP address is then associated with the converter. + +Docker +`````` +The Tektronix AWG Agent should be configured to run in a Docker container. +An example docker-compose service configuration is shown here:: + + ocs-psuK: + image: simonsobs/ocs-tektronix-agent:latest + hostname: ocs-docker + volumes: + - ${OCS_CONFIG_DIR}:/config:ro + command: + - "--instance-id=tektronix" + +Example Client +-------------- +Below is an example client demonstrating full agent functionality. +Note that all tasks can be run even while the data acquisition process +is running.:: + + from ocs.matched_client import MatchedClient + + #Initialize the power supply + tek = MatchedClient('tektronix', args=[]) + tek.init.start() + tek.init.wait() + + #Set AWG frequency + psuK.set_frequency.start(frequency=200) + psuK.set_frequency.wait() + + #Set AWG peak to peak voltage + psuK.set_amplitude.start(amplitude=5) + psuK.set_amplitude.wait() + + #Set AWG on/off + psuK.set_output.start(state=True) + psuK.set_output.wait() + +Agent API +--------- + +.. autoclass:: agents.tektronix3021c.tektronix_agent.TektronixAWGAgent + :members: set_frequency, set_amplitude, set_output diff --git a/docs/conf.py b/docs/conf.py index fbf93f5ed..febb8d695 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -72,6 +72,10 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' +autodoc_mock_imports = ['spt3g', 'so3g', 'labjack'] +from unittest import mock +for m in autodoc_mock_imports: + sys.modules[m] = mock.Mock() # -- Options for HTML output ------------------------------------------------- @@ -171,5 +175,3 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True - - diff --git a/docs/index.rst b/docs/index.rst index 9f900d3eb..47ca02f6d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -47,6 +47,7 @@ Simulator Reference Simulators are used to mock software and hardware agents/scpi_psu agents/smurf_recorder agents/synacc + agents/tektronix3021c .. toctree:: :caption: Simulator Reference diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py new file mode 100644 index 000000000..e0dce908f --- /dev/null +++ b/socs/agent/tektronix3021c_driver.py @@ -0,0 +1,72 @@ +"""Michael Randall + mrandall@ucsd.edu""" + +from socs.agent import prologixInterface + +class tektronixInterface: + + def __init__(self, ip_address, gpibAddr, verbose=True): + self.pro = prologixInterface.prologixInterface(ip=ip_address) + + self.verbose = verbose + self.gpibAddr = gpibAddr + + + def connGpib(self): + self.pro.write('++addr ' + str(self.gpibAddr)) + + + def write(self, msg): + self.connGpib() + self.pro.write(msg) + + + def read(self): + return self.pro.read() + + + def identify(self): + self.write('*idn?') + return self.read() + + + def setFreq(self, freq): + #gpib_connect(awg_gpib_addr) + self.write('SOUR:FREQ {:.3f}\n'.format(freq)) + + """ + if self.verbose: + self.p.send('SOUR:FREQ?\n') + freq_set = float(self.p.recv(128).rstrip()) + + print('freq: {:.3e} Hz').format(freq_set) + """ + + def setAmp(self, amp): + #self.gpib_connect(self.awg_gpib_address) + self.write('SOUR:VOLT {:.3f}\n'.format(amp)) + + """ + if self.verbose: + self.p.send('SOUR:VOLT?\n') + amp_set = float(self.p.recv(128).rstrip()) + + print('set amp: {:.3e} V'.format(amp_set)) + """ + + def setOutput(self, state): + #self.gpib_connect(self.awg_gpib_address) + self.write('OUTP:STAT {:.0f}\n'.format(state)) + + """ + if self.verbose: + self.p.send('OUTP:STAT?\n') + state_set = float(self.p.recv(128).rstrip()) + + print('output state: {:.0f}'.format(state_set)) + """ + + + + + From 135df3bc08c7c9745d9291bc1d72e6c29feff7eb Mon Sep 17 00:00:00 2001 From: jlashner Date: Thu, 18 Feb 2021 12:21:31 -0500 Subject: [PATCH 19/43] Add tektronix3021 to docker-compose --- docker-compose.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index 622d32934..13f98ca35 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -109,6 +109,13 @@ services: image: "ocs-meinberg-m1000-agent" build: ./agents/meinberg_m1000/ + # -------------------------------------------------------------------------- + # Tektronix3021c + # -------------------------------------------------------------------------- + ocs-tektronix3021c-agent: + image: "ocs-tektronix3021c-agent" + build: ./agents/tektronix3021c/ + # -------------------------------------------------------------------------- # SOCS Simulators # -------------------------------------------------------------------------- From 9c990b66e29b6da00e029ac563894d47df081650 Mon Sep 17 00:00:00 2001 From: Max Silva-Feaver Date: Fri, 19 Feb 2021 12:58:31 -0800 Subject: [PATCH 20/43] Created smurf crate monitor agent (#139) * Created smurf crate monitor agent * Fixed typo in rst file crashing docs build test * Removed argparse usage in docs. * Removed emacs buffer files * Swapped tabs for spaces and added check for exit condition during acquisition wait time * Addressed syntax issues, print statements, doc errors, from Jack and Matthew * Added agent refernce to index.rst file * Added Dockerfile to the main docker-compose.yaml * Fixed parser build issues, links to ssh setup, and other comments * Fixed bad comments and entrypoint * Removed unused methods from class, fixed logging, added parser fn for docs. * Moved log statement to __main__ Co-authored-by: jseibert575 --- agents/smurf_crate_monitor/Dockerfile | 17 ++ .../smurf_crate_monitor.py | 287 ++++++++++++++++++ docker-compose.yml | 10 +- docs/agents/smurf_crate_monitor.rst | 102 +++++++ docs/index.rst | 1 + 5 files changed, 415 insertions(+), 2 deletions(-) create mode 100644 agents/smurf_crate_monitor/Dockerfile create mode 100644 agents/smurf_crate_monitor/smurf_crate_monitor.py create mode 100644 docs/agents/smurf_crate_monitor.rst diff --git a/agents/smurf_crate_monitor/Dockerfile b/agents/smurf_crate_monitor/Dockerfile new file mode 100644 index 000000000..8e696038e --- /dev/null +++ b/agents/smurf_crate_monitor/Dockerfile @@ -0,0 +1,17 @@ +# OCS SMuRF Crate Agent +# ocs Agent container for running the smurf crate monitor. + +# Use ocs base image +FROM socs:latest + +# Set the working directory to registry directory +WORKDIR /app/socs/agents/smurf_crate_monitor/ + +COPY . . + +# Run registry on container startup +ENTRYPOINT ["dumb-init", "python3", "-u", "smurf_crate_monitor.py"] + +# Sensible defaults for setup with sisock +CMD ["--site-hub=ws://sisock-crossbar:8001/ws", \ + "--site-http=http://sisock-crossbar:8001/call"] diff --git a/agents/smurf_crate_monitor/smurf_crate_monitor.py b/agents/smurf_crate_monitor/smurf_crate_monitor.py new file mode 100644 index 000000000..d80cc4ee6 --- /dev/null +++ b/agents/smurf_crate_monitor/smurf_crate_monitor.py @@ -0,0 +1,287 @@ +from ocs import ocs_agent, site_config +import time +import argparse +import numpy as np +import subprocess +import txaio +txaio.use_twisted() + + +def get_sensors(shm_addr): + """ + Runs a command on the shelf manager that returns a list of all + of the avialable sensors to stdout. Uses subprocess module to + read stdout and identify the ipmb address and sensor id for all + sensors which are Threshold type as opposed to discrete type, + which are alarms. + Args: + shm_addr (str): + Address used to connect to shelf manager ex. root@192.168.1.2 + Returns: + ipmbs (str list): + List of Intelligent Platform Management Bus (IPMB) addresses + sensids (str list): + List of sensor identification names, same length as ipmbs list. + """ + # SSH to shelf manager + cmd = ['ssh', f'{shm_addr}\n'] + # Send command to shelf manager + cmd += ['clia', 'sensordata\n'] + # Intialize output data + ipmbs = [] + sensids = [] + masksens = [] + check_sense = False + + # Send command to ssh and run command on shelf + ssh = subprocess.Popen(cmd, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + # Readback shelfmanager standard out + result = ssh.stdout.readlines() + # Parse readback data line by line unless empty + if result == []: + error = ssh.stderr.readlines() + LOG.error("ERROR: %s" % error) + else: + for r in result: + if ': LUN' in r.decode('utf-8'): + check_sense = True + ipmbs.append(r.decode('utf-8').split(': LUN')[0]) + sname = r.decode('utf-8').split('(')[-1].split(')')[0] + sensids.append(sname) + continue + if check_sense: + if 'Threshold' in r.decode('utf-8'): + masksens.append(True) + if 'Discrete' in r.decode('utf-8'): + masksens.append(False) + check_sense = False + ipmbs = np.asarray(ipmbs) + sensids = np.asarray(sensids) + masksens = np.asarray(masksens) + return ipmbs[masksens], sensids[masksens] + + +def get_channel_names(ipmbs): + """ + Converts ipmb addresses to human readable names based on the + definitions of ipmb addresses in the ATCA manuals. + Args: + ipmbs (str list): + List of Intelligent Platform Management Bus (IPMB) addresses + Returns: + chan_names (str list): + List of human readable names for each IPMB address. + """ + chan_names = np.zeros(len(ipmbs)).astype(str) + for i, ipmb in enumerate(ipmbs): + if ipmb == '20': + chan_names[i] = 'shelf' + continue + if ipmb == 'fe': + chan_names[i] = 'pwr_mgmt' + continue + slot = int('0x'+ipmb, 16)//2-64 + if slot == 1: + chan_names[i] = 'switch' + continue + chan_names[i] = f'slot{slot}' + return chan_names + + +def get_data_dict(shm_addr, ipmbs, sensids, chan_names, + crate_id): + """ + Given a list of ipmb addresses, sensor ids, and channel names, + the shelf manager is queeried and the current sensor values for + the provided list of sensors is read. The values are then + output in a dictionary in the format needed to publish to + influxdb. + Args: + shm_addr (str): + Address used to connect to shelf manager ex. root@192.168.1.2 + ipmbs (str list): + List of Intelligent Platform Management Bus (IPMB) addresses. + sensids (str list): + List of sensor identification names, same length as ipmbs list. + chan_names (str list): + List of human readable names for each IPMB address. + crate_id (str): + String to identify crate number in feed names, ex: crate_1 + Returns: + data_dict (dict): + Dict with structure, {data : value} collects the output + of all of the sensors passed into the fuction. Ensures the + keys match the influxdb feedname requirements + """ + data_dict = {} + cmd = ['ssh', f'{shm_addr}\n', 'clia', 'sensordata\n'] + ssh = subprocess.Popen(cmd, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + result = ssh.stdout.readlines() + if result == []: + error = ssh.stderr.readlines() + LOG.error("ERROR: %s" % error) + else: + for ipmb, sensid, chan_name in zip(ipmbs, sensids, chan_names): + sense_chan = False + for r in result: + if ipmb in r.decode('utf-8'): + if sensid in r.decode('utf-8'): + sense_chan = True + continue + if sense_chan: + if 'Processed data:' in r.decode('utf-8'): + sid = sensid.strip('"') + sid = sid.replace(" ", "_") + sid = sid.replace(":", "") + sid = sid.replace("+", "") + sid = sid.replace(".", "p") + line = r.strip().decode("utf-8") + if line.split(':')[-1].split(' ')[0] == '': + val = float(line.split(':')[-1].split(' ')[1]) + else: + val = float(line.split(':')[-1].split(' ')[0]) + data_dict[f'{crate_id}_{chan_name}_{sid}'] = val + sense_chan = False + return data_dict + + +class SmurfCrateMonitor: + def __init__(self, agent, crate_id, shm_addr): + self.agent = agent + self.log = agent.log + self.shm_addr = shm_addr + self.crate_id = crate_id + # Register feed + agg_params = { + 'frame_length': 10*60 + } + self.log.info('registering') + self.agent.register_feed('smurf_sensors', + record=True, + agg_params=agg_params, + buffer_time=0.) + + def init_data_stream(self, shm_addr): + """ + Wrapper for get_sensors and get_channel_names which generates + the list of sensors to use in datastreaming. + Args: + shm_addr (str): + Address used to connect to shelf manager ex. root@192.168.1.2 + Return: + ipmbs (str list): + List of Intelligent Platform Management Bus (IPMB) addresses. + sensids (str list): + List of sensor identification names, same length as ipmbs list. + chan_names (str list): + List of human readable names for each IPMB address. + """ + ipmbs, sensids = get_sensors(shm_addr) + chan_names = get_channel_names(ipmbs) + return ipmbs, sensids, chan_names + + def init_crate(self, session, params=None): + """ + Run at the startup of the docker to check that you can successfully + ssh to the crate and run a command. If it runs successfully then + you should see the home directory of the shelf manager printed to + the docker logs and the data acquisition process to start, if not + you will see an error in the logs and acquistion won't start. + """ + self.log.info(self.shm_addr) + cmd = ['ssh', f'{self.shm_addr}\n', 'pwd\n'] + self.log.info("command run: {c}", c=cmd) + ssh = subprocess.Popen(cmd, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + result = ssh.stdout.readlines() + self.log.info(result[0]) + if result == []: + error = ssh.stderr.readlines() + self.log.error(f"ERROR: {error}") + return False, 'Crate failed to initialize' + if result[0].decode("utf-8") == '/etc/home/root\n': + self.log.info('Successfully ssh-d into shelf') + self.agent.start('acq') + return True, 'Crate Initialized' + + def start_acq(self, session, params=None): + """ + Starts acquiring data, hardcoded for one data point every 30 + seconds because we intend for this to be very low rate data. + """ + self.log.info('Started acquisition') + shm_addr = self.shm_addr + ipmbs, sensids, chan_names = self.init_data_stream(shm_addr=shm_addr) + self.log.info('Got sensor names') + self.take_data = True + while self.take_data: + for _ in range(30): + if not self.take_data: + break + time.sleep(1) + datadict = get_data_dict(shm_addr=self.shm_addr, + ipmbs=ipmbs, + sensids=sensids, + chan_names=chan_names, + crate_id=self.crate_id) + data = { + 'timestamp': time.time(), + 'block_name': f'smurf_{self.crate_id}', + 'data': datadict + } + self.agent.publish_to_feed('smurf_sensors', data) + return True, 'Acquisition exited cleanly' + + def stop_acq(self, session, params=None): + """ + Stops acquiring data if the dpcler os stopped. + """ + if self.take_data: + self.take_data = False + return True, 'requested to stop taking data.' + else: + return False, 'acq is not currently running' + + +def make_parser(parser=None): + """ + Build the argument parser for the Agent. Allows sphinx to automatically + build documentation based on this function. + """ + if parser is None: + parser = argparse.ArgumentParser() + # Add options specific to this agent. + pgroup = parser.add_argument_group("Agent Options") + pgroup.add_argument('--shm-addr', + help='Shelf manager addres i.e. root@192.168.1.2') + pgroup.add_argument('--crate-id', + help='Crate id used for block_name') + return parser + + +if __name__ == '__main__': + LOG = txaio.make_logger() + parser = make_parser() + args = site_config.parse_args(agent_class='CrateAgent', + parser=parser) + startup = True + agent, runner = ocs_agent.init_site_agent(args) + shm_addr = args.shm_addr + crate_id = args.crate_id + + smurfcrate = SmurfCrateMonitor(agent, crate_id, shm_addr) + + agent.register_task('init_crate', smurfcrate.init_crate, + startup=startup) + agent.register_process('acq', smurfcrate.start_acq, + smurfcrate.stop_acq) + + runner.run(agent, auto_reconnect=True) diff --git a/docker-compose.yml b/docker-compose.yml index 13f98ca35..09de0eb71 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -109,6 +109,13 @@ services: image: "ocs-meinberg-m1000-agent" build: ./agents/meinberg_m1000/ + # ------------------------------------------------------------------------- + # SMuRF Crate Monitor + # ------------------------------------------------------------------------- + ocs-smurf-crate-monitor: + image: "ocs-smurf-crate-monitor" + build: ./agents/smurf_crate_monitor/ + # -------------------------------------------------------------------------- # Tektronix3021c # -------------------------------------------------------------------------- @@ -132,5 +139,4 @@ services: # -------------------------------------------------------------------------- ocs-lakeshore240-simulator: image: "ocs-lakeshore240-simulator" - build: ./simulators/lakeshore240/ - + build: ./simulators/lakeshore240/ diff --git a/docs/agents/smurf_crate_monitor.rst b/docs/agents/smurf_crate_monitor.rst new file mode 100644 index 000000000..933a8d090 --- /dev/null +++ b/docs/agents/smurf_crate_monitor.rst @@ -0,0 +1,102 @@ +.. highlight:: rst + +.. _smurf_crate_monitor: + +========================= +Smurf Crate Monitor Agent +========================= + +The SMuRF readout system uses Advanced Telecommunications Computing Architecture +(ATCA) crates for powering and communicating between boards and the site networking +and timing infrastructure. These crates have a small computer on board called a +shelf manager which monitors all of the sensors in the crate including ammeters, and +voltmeters for the power into the crates and into each front and rear module of each +active slot used in the crate. There are also tachometers on each of the crate fans +and various thermometers withing the crate and each of the boards plugged into the +crate which the shelf manager monitors. There are multiple crate manufacturers +but the shelf managers all share the same set of programming/communication called +Pigeon Poing Communication so this agent should work across multiple crate +manufacturers. This agent connects to a shell terminal of a crate shelf +manager over ssh through the python subprocess package and then runs the +command 'clia sensordata' and parses its output to identify all of the available +sensors then stream and publish them. + +.. argparse:: + :filename: ../agents/smurf_crate_monitor/smurf_crate_monitor.py + :func: make_parser + :prog: python3 smurf_crate_monitor.py + +Configuration File Examples +--------------------------- +Below are configuration examples for the ocs config file and for running the +Agent in a docker container. + +ocs-config +`````````` +To configure the SMuRF Crate Monitor Agent we need to add a CrateAgent entry +to our site configuration file. Here is an example configuration block using +all of the available arguments:: + + {'agent-class': 'CrateAgent', + 'instance-id': 'crate1-monitor', + 'arguments':[ + ['--shm-addr', 'root@192.168.1.2'], + ['--crate-id', 'crate1'], + ]}, + +Both arguments are required to run, the 'shm-addr' argumnent should always +be root as user and then the ip address will depend on your setup of the +shelf manager at your site. The '192.168.1.2' address is the default address +setup during the instructions laid out in the 'smurfsetup' instructions on +the simons wiki for so testing institutions. You should make sure that you +can ssh from the computer the docker container will run on to the shelf +manager directly. Additionally in order to connect through the docker +container you will need to setup ssh keys with the ocs-user following these +steps: + +1. Make sure ocs-user has a ssh key generated. See + http://simonsobservatory.wikidot.com/daq:smurf-ssh-permissions for more info + +2. Switch to ocs user using 'sudo su ocs' + +3. 'ssh' into the smurf-crate and add ssh host-verification when prompted + +4. Copy ocs-user ssh key using 'ssh-copy-id' + +You also need to add the ocs-base anchor and mount the home directory of +the ocs-user in your 'docker-compose' file, see below for an example. + +The second argument, 'crate-id', is just an identifier for your feed names +to distinguish between identical sensors on different crates. + +Docker +`````` +The SMuRF Crate Agent should be configured to run in a Docker container. An +example docker-compose service configuration is shown here:: + + ocs-smurf-crate-monitor: + <<: *ocs-base + image: ocs-smurf-crate-monitor:latest + hostname: adaq1-docker + network_mode: "host" + volumes: + - ${OCS_CONFIG_DIR}:/config + - /home/ocs:/home/ocs + command: + - "--instance-id=crate1-monitor" + +An example of the 'ocs-base' anchor is shown here:: + + x-ocs-base: &ocs-base + hostname: adaq1-docker + user: "9000" + environment: + LOGLEVEL: debug + volumes: + - ${OCS_CONFIG_DIR}:/config + +Agent API +--------- + +.. autoclass:: agents.smurf_crate_monitor.smurf_crate_monitor.SmurfCrateMonitor + :members: init_crate, start_acq diff --git a/docs/index.rst b/docs/index.rst index 47ca02f6d..8521ade03 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -45,6 +45,7 @@ Simulator Reference Simulators are used to mock software and hardware agents/pfeiffer agents/pysmurf/index agents/scpi_psu + agents/smurf_crate_monitor agents/smurf_recorder agents/synacc agents/tektronix3021c From 8128c64e1fb8d4702d8deeaabd531bcd7735d043 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Fri, 19 Feb 2021 17:43:39 -0500 Subject: [PATCH 21/43] Protect ocs import on rtd in crate manager agent --- agents/smurf_crate_monitor/smurf_crate_monitor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/agents/smurf_crate_monitor/smurf_crate_monitor.py b/agents/smurf_crate_monitor/smurf_crate_monitor.py index d80cc4ee6..7adddbdd2 100644 --- a/agents/smurf_crate_monitor/smurf_crate_monitor.py +++ b/agents/smurf_crate_monitor/smurf_crate_monitor.py @@ -1,4 +1,4 @@ -from ocs import ocs_agent, site_config +import os import time import argparse import numpy as np @@ -6,6 +6,10 @@ import txaio txaio.use_twisted() +ON_RTD = os.environ.get('READTHEDOCS') == 'True' +if not ON_RTD: + from ocs import ocs_agent, site_config + def get_sensors(shm_addr): """ From 2d703602eed22681db87ccc7f345b9c2fcbf4620 Mon Sep 17 00:00:00 2001 From: Jack Lashner Date: Wed, 3 Mar 2021 12:12:16 -0800 Subject: [PATCH 22/43] Remove check_port test (#146) Co-authored-by: jseibert575 --- socs/agent/smurf_recorder.py | 43 ++++++------------------------------ 1 file changed, 7 insertions(+), 36 deletions(-) diff --git a/socs/agent/smurf_recorder.py b/socs/agent/smurf_recorder.py index 94fd102a9..e2e2e6b7a 100644 --- a/socs/agent/smurf_recorder.py +++ b/socs/agent/smurf_recorder.py @@ -22,34 +22,6 @@ class FlowControl(Enum): CLEANSE = 3 -def check_port(addr): - """ - This function checks if socket port is currently open on a host. This can - be used to check if the smurf-streamer has created it's G3NetworkSender - object without attempting to create a G3Reader. This function is - non-blocking and should return immediately. - - Parameters - ---------- - addr: str - Address describing the port to connect to. For example: - ``tcp://localhost:4532`` - """ - host, port = addr.split('//')[-1].split(':') - port = int(port) - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setblocking(True) - s.settimeout(0.0) - try: - s.connect((host, port)) - return True - except socket.error: - return False - finally: - s.close() - - def _create_dirname(start_time, data_dir, stream_id): """Create the file path for .g3 file output. @@ -243,14 +215,13 @@ def _establish_reader_connection(self, timeout=5): """ reader = None - if (check_port(self.address)): - try: - reader = core.G3Reader(self.address, - timeout=timeout) - self.log.debug("G3Reader connection to {addr} established!", - addr=self.address) - except RuntimeError: - self.log.error("G3Reader could not connect.") + try: + reader = core.G3Reader(self.address, + timeout=timeout) + self.log.debug("G3Reader connection to {addr} established!", + addr=self.address) + except RuntimeError: + self.log.error("G3Reader could not connect.") # Prevent rapid connection attempts if self.last_connection_time is not None: From 39ea8966577652dc836d53b09e8c496a556a6c77 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 11:24:54 -0500 Subject: [PATCH 23/43] Create GpibInterface class for GPIB Agents Both the Tektronix and SCPI PSU interface code use this Prologix interface and were replicating some of the same code. This makes a GpibInterface class they can both inherit from. --- socs/agent/prologixInterface.py | 48 ++++++++++++++------- socs/agent/scpi_psu_driver.py | 49 +++++++--------------- socs/agent/tektronix3021c_driver.py | 65 ++--------------------------- 3 files changed, 54 insertions(+), 108 deletions(-) diff --git a/socs/agent/prologixInterface.py b/socs/agent/prologixInterface.py index 32d1265ca..2de776187 100644 --- a/socs/agent/prologixInterface.py +++ b/socs/agent/prologixInterface.py @@ -1,37 +1,57 @@ -import socket as socket +import socket DEFAULT_ESCAPE = 'xYzZyX' -class prologixInterface: +class GpibInterface: + def __init__(self, ip_address, gpibAddr): + self.pro = PrologixInterface(ip=ip_address) + self.gpibAddr = gpibAddr - def __init__(self, ip, escapeString=DEFAULT_ESCAPE): + def connGpib(self): + self.pro.write('++addr ' + str(self.gpibAddr)) + + def write(self, msg): + self.connGpib() + self.pro.write(msg) + + def read(self): + return self.pro.read() + + def identify(self): + self.write('*idn?') + return self.read() + + +class PrologixInterface: + def __init__(self, ip, escape_string=DEFAULT_ESCAPE): self.ip = ip - self.escapeString = escapeString - #self.gpibAddr = gpibAddr + self.escape_string = escape_string + # self.gpibAddr = gpibAddr + self.sock = None self.connSocket() self.configure() def connSocket(self): - self.pro = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.pro.connect((self.ip, 1234)) - self.pro.settimeout(5) + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((self.ip, 1234)) + self.sock.settimeout(5) def configure(self): self.write('++mode 1\n') self.write('++auto 1\n') - #self.write('++addr ' + str(self.gpibAddr)) + # self.write('++addr ' + str(self.gpibAddr)) def write(self, msg): message = msg + '\n' - self.pro.send(message.encode()) + self.sock.send(message.encode()) -# def writeGpib(self, gpibAddr, msg): -# self.write('++addr ' + str(gpibAddr)) -# self.write(msg) + # def writeGpib(self, gpibAddr, msg): + # self.write('++addr ' + str(gpibAddr)) + # self.write(msg) def read(self): - return self.pro.recv(128).decode().rstrip('\n').rstrip('\r') + return self.sock.recv(128).decode().rstrip('\n').rstrip('\r') def identify(self): self.write('++ver') diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index f0bf30b1b..5e9bc076c 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -1,41 +1,24 @@ # Tucker Elleflot -from socs.agent import prologixInterface +from socs.agent.prologixInterface import GpibInterface -class psuInterface: - - def __init__(self, ip_address, gpibAddr, verbose=True): - self.pro = prologixInterface.prologixInterface(ip=ip_address) - - self.gpibAddr = gpibAddr +class psuInterface(GpibInterface): + def __init__(self, ip_address, gpibAddr, verbose=False): + super().__init__(ip_address, gpibAddr) self.verbose = verbose - def connGpib(self): - self.pro.write('++addr ' + str(self.gpibAddr)) - - def write(self, msg): - self.connGpib() - self.pro.write(msg) - - def read(self): - return self.pro.read() - - def identify(self): - self.write('*idn?') - return self.read() - def enable(self, ch): ''' - Enables output for channel (1,2,3) but does not turn it on. + Enables output for channel (1,2,3) but does not turn it on. Depending on state of power supply, it might need to be called - before the output is set. + before the output is set. ''' self.setChan(ch) self.write('OUTP:ENAB ON') - + def disable(self, ch): ''' - disabled output from a channel (1,2,3). once called, enable must be + disabled output from a channel (1,2,3). once called, enable must be called to turn on the channel again ''' self.write('OUTP:ENAB OFF') @@ -49,14 +32,14 @@ def setOutput(self, ch, out): ch - channel (1,2,3) to set status out - ON: True|1|'ON' OFF: False|0|'OFF' - Calls enable to ensure a channel can be turned on. We might want to + Calls enable to ensure a channel can be turned on. We might want to make them separate (and let us use disable as a safety feature) but for now I am thinking we just want to thing to turn on when we tell it to turn on. ''' self.setChan(ch) self.enable(ch) - if type(out)==str: + if type(out) == str: self.write('CHAN:OUTP '+out) elif out: self.write('CHAN:OUTP ON') @@ -75,16 +58,16 @@ def getOutput(self, ch): def setVolt(self, ch, volt): self.setChan(ch) self.write('volt ' + str(volt)) - #if self.verbose: - # voltage = self.getVolt(ch) - #print "CH " + str(ch) + " is set to " + str(voltage) " V" + if self.verbose: + voltage = self.getVolt(ch) + print("CH " + str(ch) + " is set to " + str(voltage) + " V") def setCurr(self, ch, curr): self.setChan(ch) self.write('curr ' + str(curr)) - #if self.verbose: - # current = self.getCurr(ch) - #print "CH " + str(ch) + " is set to " + str(current) " A" + if self.verbose: + current = self.getCurr(ch) + print("CH " + str(ch) + " is set to " + str(current) + " A") def getVolt(self, ch): self.setChan(ch) diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py index e0dce908f..3be65f095 100644 --- a/socs/agent/tektronix3021c_driver.py +++ b/socs/agent/tektronix3021c_driver.py @@ -1,72 +1,15 @@ """Michael Randall mrandall@ucsd.edu""" -from socs.agent import prologixInterface +from socs.agent.prologixInterface import GpibInterface -class tektronixInterface: - - def __init__(self, ip_address, gpibAddr, verbose=True): - self.pro = prologixInterface.prologixInterface(ip=ip_address) - - self.verbose = verbose - self.gpibAddr = gpibAddr - - def connGpib(self): - self.pro.write('++addr ' + str(self.gpibAddr)) - - - def write(self, msg): - self.connGpib() - self.pro.write(msg) - - - def read(self): - return self.pro.read() - - - def identify(self): - self.write('*idn?') - return self.read() - - +class tektronixInterface(GpibInterface): def setFreq(self, freq): - #gpib_connect(awg_gpib_addr) self.write('SOUR:FREQ {:.3f}\n'.format(freq)) - - """ - if self.verbose: - self.p.send('SOUR:FREQ?\n') - freq_set = float(self.p.recv(128).rstrip()) - - print('freq: {:.3e} Hz').format(freq_set) - """ - + def setAmp(self, amp): - #self.gpib_connect(self.awg_gpib_address) self.write('SOUR:VOLT {:.3f}\n'.format(amp)) - - """ - if self.verbose: - self.p.send('SOUR:VOLT?\n') - amp_set = float(self.p.recv(128).rstrip()) - - print('set amp: {:.3e} V'.format(amp_set)) - """ - + def setOutput(self, state): - #self.gpib_connect(self.awg_gpib_address) self.write('OUTP:STAT {:.0f}\n'.format(state)) - - """ - if self.verbose: - self.p.send('OUTP:STAT?\n') - state_set = float(self.p.recv(128).rstrip()) - - print('output state: {:.0f}'.format(state_set)) - """ - - - - - From be7e64606f8e40f0694e6d7ccd492e3f44a85de5 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 11:46:38 -0500 Subject: [PATCH 24/43] Base GpibInterface on PrologixInterface class --- socs/agent/prologixInterface.py | 40 ++++++++++++--------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/socs/agent/prologixInterface.py b/socs/agent/prologixInterface.py index 2de776187..68e6081fb 100644 --- a/socs/agent/prologixInterface.py +++ b/socs/agent/prologixInterface.py @@ -3,31 +3,10 @@ DEFAULT_ESCAPE = 'xYzZyX' -class GpibInterface: - def __init__(self, ip_address, gpibAddr): - self.pro = PrologixInterface(ip=ip_address) - self.gpibAddr = gpibAddr - - def connGpib(self): - self.pro.write('++addr ' + str(self.gpibAddr)) - - def write(self, msg): - self.connGpib() - self.pro.write(msg) - - def read(self): - return self.pro.read() - - def identify(self): - self.write('*idn?') - return self.read() - - class PrologixInterface: def __init__(self, ip, escape_string=DEFAULT_ESCAPE): self.ip = ip self.escape_string = escape_string - # self.gpibAddr = gpibAddr self.sock = None self.connSocket() self.configure() @@ -40,19 +19,28 @@ def connSocket(self): def configure(self): self.write('++mode 1\n') self.write('++auto 1\n') - # self.write('++addr ' + str(self.gpibAddr)) def write(self, msg): message = msg + '\n' self.sock.send(message.encode()) - # def writeGpib(self, gpibAddr, msg): - # self.write('++addr ' + str(gpibAddr)) - # self.write(msg) - def read(self): return self.sock.recv(128).decode().rstrip('\n').rstrip('\r') def identify(self): self.write('++ver') return self.read() + + +class GpibInterface(PrologixInterface): + def __init__(self, ip_address, gpibAddr): + super().__init__(ip_address) + self.gpibAddr = gpibAddr + + def write(self, msg): + self.sock.write('++addr ' + str(self.gpibAddr)) + super().write(msg) + + def identify(self): + self.write('*idn?') + return self.read() From 18b9684a7893c72ad0bb59a6da4fbd7147487a44 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 11:50:48 -0500 Subject: [PATCH 25/43] Fix write call in GpibInterface --- socs/agent/prologixInterface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/socs/agent/prologixInterface.py b/socs/agent/prologixInterface.py index 68e6081fb..239a78361 100644 --- a/socs/agent/prologixInterface.py +++ b/socs/agent/prologixInterface.py @@ -38,7 +38,7 @@ def __init__(self, ip_address, gpibAddr): self.gpibAddr = gpibAddr def write(self, msg): - self.sock.write('++addr ' + str(self.gpibAddr)) + super().write('++addr ' + str(self.gpibAddr)) super().write(msg) def identify(self): From d840fe95a63d76ba5f82dd811c3ba91a85fc2e0c Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 12:00:34 -0500 Subject: [PATCH 26/43] Change some letter casing in module and method names --- socs/agent/{prologixInterface.py => prologix_interface.py} | 4 ++-- socs/agent/scpi_psu_driver.py | 2 +- socs/agent/tektronix3021c_driver.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) rename socs/agent/{prologixInterface.py => prologix_interface.py} (95%) diff --git a/socs/agent/prologixInterface.py b/socs/agent/prologix_interface.py similarity index 95% rename from socs/agent/prologixInterface.py rename to socs/agent/prologix_interface.py index 239a78361..57bf1bd54 100644 --- a/socs/agent/prologixInterface.py +++ b/socs/agent/prologix_interface.py @@ -8,10 +8,10 @@ def __init__(self, ip, escape_string=DEFAULT_ESCAPE): self.ip = ip self.escape_string = escape_string self.sock = None - self.connSocket() + self.conn_socket() self.configure() - def connSocket(self): + def conn_socket(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.ip, 1234)) self.sock.settimeout(5) diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index 5e9bc076c..5e5561df3 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -1,5 +1,5 @@ # Tucker Elleflot -from socs.agent.prologixInterface import GpibInterface +from socs.agent.prologix_interface import GpibInterface class psuInterface(GpibInterface): diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py index 3be65f095..04803ffc1 100644 --- a/socs/agent/tektronix3021c_driver.py +++ b/socs/agent/tektronix3021c_driver.py @@ -1,7 +1,7 @@ """Michael Randall mrandall@ucsd.edu""" -from socs.agent.prologixInterface import GpibInterface +from socs.agent.prologix_interface import GpibInterface class tektronixInterface(GpibInterface): From afdc9517cffa3a7db3817d1a065bee590c49267f Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 12:22:44 -0500 Subject: [PATCH 27/43] Fix type check --- socs/agent/scpi_psu_driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index 5e5561df3..cadacaad2 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -39,7 +39,7 @@ def setOutput(self, ch, out): ''' self.setChan(ch) self.enable(ch) - if type(out) == str: + if isinstance(out, str): self.write('CHAN:OUTP '+out) elif out: self.write('CHAN:OUTP ON') From 7c7597ad1c308f54c0fb6202cd15d5a3f18b6271 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 12:27:44 -0500 Subject: [PATCH 28/43] Rename GpibInterface to GPIBInterface --- socs/agent/prologix_interface.py | 2 +- socs/agent/scpi_psu_driver.py | 4 ++-- socs/agent/tektronix3021c_driver.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/socs/agent/prologix_interface.py b/socs/agent/prologix_interface.py index 57bf1bd54..8d7780f6d 100644 --- a/socs/agent/prologix_interface.py +++ b/socs/agent/prologix_interface.py @@ -32,7 +32,7 @@ def identify(self): return self.read() -class GpibInterface(PrologixInterface): +class GPIBInterface(PrologixInterface): def __init__(self, ip_address, gpibAddr): super().__init__(ip_address) self.gpibAddr = gpibAddr diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index cadacaad2..3cde9b1aa 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -1,8 +1,8 @@ # Tucker Elleflot -from socs.agent.prologix_interface import GpibInterface +from socs.agent.prologix_interface import GPIBInterface -class psuInterface(GpibInterface): +class psuInterface(GPIBInterface): def __init__(self, ip_address, gpibAddr, verbose=False): super().__init__(ip_address, gpibAddr) self.verbose = verbose diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py index 04803ffc1..8758a30de 100644 --- a/socs/agent/tektronix3021c_driver.py +++ b/socs/agent/tektronix3021c_driver.py @@ -1,10 +1,10 @@ """Michael Randall mrandall@ucsd.edu""" -from socs.agent.prologix_interface import GpibInterface +from socs.agent.prologix_interface import GPIBInterface -class tektronixInterface(GpibInterface): +class tektronixInterface(GPIBInterface): def setFreq(self, freq): self.write('SOUR:FREQ {:.3f}\n'.format(freq)) From 1909a78da2b478957ee95ad001690743eeeb2245 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 15:05:24 -0500 Subject: [PATCH 29/43] Revert "Rename GpibInterface to GPIBInterface" This reverts commit c401527ffc1a6eca0c48abe4c3e6a9eebfe52380. --- socs/agent/prologix_interface.py | 2 +- socs/agent/scpi_psu_driver.py | 4 ++-- socs/agent/tektronix3021c_driver.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/socs/agent/prologix_interface.py b/socs/agent/prologix_interface.py index 8d7780f6d..57bf1bd54 100644 --- a/socs/agent/prologix_interface.py +++ b/socs/agent/prologix_interface.py @@ -32,7 +32,7 @@ def identify(self): return self.read() -class GPIBInterface(PrologixInterface): +class GpibInterface(PrologixInterface): def __init__(self, ip_address, gpibAddr): super().__init__(ip_address) self.gpibAddr = gpibAddr diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index 3cde9b1aa..cadacaad2 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -1,8 +1,8 @@ # Tucker Elleflot -from socs.agent.prologix_interface import GPIBInterface +from socs.agent.prologix_interface import GpibInterface -class psuInterface(GPIBInterface): +class psuInterface(GpibInterface): def __init__(self, ip_address, gpibAddr, verbose=False): super().__init__(ip_address, gpibAddr) self.verbose = verbose diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py index 8758a30de..04803ffc1 100644 --- a/socs/agent/tektronix3021c_driver.py +++ b/socs/agent/tektronix3021c_driver.py @@ -1,10 +1,10 @@ """Michael Randall mrandall@ucsd.edu""" -from socs.agent.prologix_interface import GPIBInterface +from socs.agent.prologix_interface import GpibInterface -class tektronixInterface(GPIBInterface): +class tektronixInterface(GpibInterface): def setFreq(self, freq): self.write('SOUR:FREQ {:.3f}\n'.format(freq)) From e86934b8d26acf03e1209f3e4b0626e84a7e7fac Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 15:11:22 -0500 Subject: [PATCH 30/43] Rename Tektronix driver classes and methods per PEP08 recs --- agents/tektronix3021c/tektronix_agent.py | 10 +++++----- socs/agent/tektronix3021c_driver.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/agents/tektronix3021c/tektronix_agent.py b/agents/tektronix3021c/tektronix_agent.py index 74ea4ef88..7a360e0c4 100644 --- a/agents/tektronix3021c/tektronix_agent.py +++ b/agents/tektronix3021c/tektronix_agent.py @@ -6,7 +6,7 @@ import socket import argparse -from socs.agent.tektronix3021c_driver import tektronixInterface +from socs.agent.tektronix3021c_driver import TektronixInterface on_rtd = os.environ.get('READTHEDOCS') == 'True' if not on_rtd: @@ -53,7 +53,7 @@ def init_awg(self, session, params=None): return False, "Could not acquire lock" try: - self.awg = tektronixInterface(self.ip_address, self.gpib_slot) + self.awg = TektronixInterface(self.ip_address, self.gpib_slot) self.idn = self.awg.identify() except socket.timeout as e: @@ -90,7 +90,7 @@ def set_frequency(self, session, params=None): not be of NoneType -> {}""".format(e) if 0 < freq < 25E6: - self.awg.setFreq(freq) + self.awg.set_freq(freq) data = {'timestamp': time.time(), 'block_name': "AWG_frequency_cmd", @@ -130,7 +130,7 @@ def set_amplitude(self, session, params=None): of NoneType -> {}""".format(e) if 0 < amp < 10: - self.awg.setAmp(amp) + self.awg.set_amp(amp) data = {'timestamp': time.time(), 'block_name': "AWG_amplitude_cmd", @@ -168,7 +168,7 @@ def set_output(self, session, params=None): return False, """State must not be of NoneType -> {}""".format(e) - self.awg.setOutput(state) + self.awg.set_output(state) data = {'timestamp': time.time(), 'block_name': "AWG_output_cmd", diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py index 04803ffc1..c3f2c47fe 100644 --- a/socs/agent/tektronix3021c_driver.py +++ b/socs/agent/tektronix3021c_driver.py @@ -4,12 +4,12 @@ from socs.agent.prologix_interface import GpibInterface -class tektronixInterface(GpibInterface): - def setFreq(self, freq): +class TektronixInterface(GpibInterface): + def set_freq(self, freq): self.write('SOUR:FREQ {:.3f}\n'.format(freq)) - def setAmp(self, amp): + def set_amp(self, amp): self.write('SOUR:VOLT {:.3f}\n'.format(amp)) - def setOutput(self, state): + def set_output(self, state): self.write('OUTP:STAT {:.0f}\n'.format(state)) From 79d7996006cc37ecf652346ceea9c7ad1dd1b34b Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 18 Feb 2021 15:25:41 -0500 Subject: [PATCH 31/43] Rename PSU driver classes and methods per PEP08 recs --- agents/scpi_psu/scpi_psu_agent.py | 14 ++++++------- socs/agent/scpi_psu_driver.py | 34 +++++++++++++++---------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/agents/scpi_psu/scpi_psu_agent.py b/agents/scpi_psu/scpi_psu_agent.py index 46c5eda9a..4a8cc5f46 100644 --- a/agents/scpi_psu/scpi_psu_agent.py +++ b/agents/scpi_psu/scpi_psu_agent.py @@ -2,7 +2,7 @@ import os import socket import argparse -from socs.agent.scpi_psu_driver import psuInterface +from socs.agent.scpi_psu_driver import PsuInterface on_rtd = os.environ.get('READTHEDOCS') == 'True' if not on_rtd: @@ -40,7 +40,7 @@ def init_psu(self, session, params=None): return False, "Could not acquire lock" try: - self.psu = psuInterface(self.ip_address, self.gpib_slot) + self.psu = PsuInterface(self.ip_address, self.gpib_slot) self.idn = self.psu.identify() except socket.timeout as e: self.log.error("PSU timed out during connect") @@ -75,8 +75,8 @@ def monitor_output(self, session, params=None): } for chan in [1, 2, 3]: - data['data']["Voltage_{}".format(chan)] = self.psu.getVolt(chan) - data['data']["Current_{}".format(chan)] = self.psu.getCurr(chan) + data['data']["Voltage_{}".format(chan)] = self.psu.get_volt(chan) + data['data']["Current_{}".format(chan)] = self.psu.get_curr(chan) # self.log.info(str(data)) # print(data) @@ -107,7 +107,7 @@ def set_voltage(self, session, params=None): with self.lock.acquire_timeout(1) as acquired: if acquired: - self.psu.setVolt(params['channel'], params['volts']) + self.psu.set_volt(params['channel'], params['volts']) else: return False, "Could not acquire lock" @@ -123,7 +123,7 @@ def set_current(self, session, params=None): """ with self.lock.acquire_timeout(1) as acquired: if acquired: - self.psu.setCurr(params['channel'], params['current']) + self.psu.set_curr(params['channel'], params['current']) else: return False, "Could not acquire lock" @@ -139,7 +139,7 @@ def set_output(self, session, params=None): """ with self.lock.acquire_timeout(1) as acquired: if acquired: - self.psu.setOutput(params['channel'], params['state']) + self.psu.set_output(params['channel'], params['state']) else: return False, "Could not acquire lock" diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index cadacaad2..3e3c19e18 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -2,7 +2,7 @@ from socs.agent.prologix_interface import GpibInterface -class psuInterface(GpibInterface): +class PsuInterface(GpibInterface): def __init__(self, ip_address, gpibAddr, verbose=False): super().__init__(ip_address, gpibAddr) self.verbose = verbose @@ -13,7 +13,7 @@ def enable(self, ch): Depending on state of power supply, it might need to be called before the output is set. ''' - self.setChan(ch) + self.set_chan(ch) self.write('OUTP:ENAB ON') def disable(self, ch): @@ -23,10 +23,10 @@ def disable(self, ch): ''' self.write('OUTP:ENAB OFF') - def setChan(self, ch): + def set_chan(self, ch): self.write('inst:nsel ' + str(ch)) - def setOutput(self, ch, out): + def set_output(self, ch, out): ''' set status of power supply channel ch - channel (1,2,3) to set status @@ -37,7 +37,7 @@ def setOutput(self, ch, out): for now I am thinking we just want to thing to turn on when we tell it to turn on. ''' - self.setChan(ch) + self.set_chan(ch) self.enable(ch) if isinstance(out, str): self.write('CHAN:OUTP '+out) @@ -46,37 +46,37 @@ def setOutput(self, ch, out): else: self.write('CHAN:OUTP OFF') - def getOutput(self, ch): + def get_output(self, ch): ''' check if the output of a channel (1,2,3) is on (True) or off (False) ''' - self.setChan(ch) + self.set_chan(ch) self.write('CHAN:OUTP:STAT?') out = bool(float(self.read())) return out - def setVolt(self, ch, volt): - self.setChan(ch) + def set_volt(self, ch, volt): + self.set_chan(ch) self.write('volt ' + str(volt)) if self.verbose: - voltage = self.getVolt(ch) + voltage = self.get_volt(ch) print("CH " + str(ch) + " is set to " + str(voltage) + " V") - def setCurr(self, ch, curr): - self.setChan(ch) + def set_curr(self, ch, curr): + self.set_chan(ch) self.write('curr ' + str(curr)) if self.verbose: - current = self.getCurr(ch) + current = self.get_curr(ch) print("CH " + str(ch) + " is set to " + str(current) + " A") - def getVolt(self, ch): - self.setChan(ch) + def get_volt(self, ch): + self.set_chan(ch) self.write('MEAS:VOLT? CH' + str(ch)) voltage = float(self.read()) return voltage - def getCurr(self, ch): - self.setChan(ch) + def get_curr(self, ch): + self.set_chan(ch) self.write('MEAS:CURR? CH' + str(ch)) current = float(self.read()) return current From 57aaa66cc145de09ff86350bf790c7e8b9f606d5 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Tue, 2 Mar 2021 09:49:51 -0500 Subject: [PATCH 32/43] Combine GpibInterface and PrologixInterface classes --- socs/agent/prologix_interface.py | 24 +++++++----------------- socs/agent/scpi_psu_driver.py | 8 ++++---- socs/agent/tektronix3021c_driver.py | 4 ++-- 3 files changed, 13 insertions(+), 23 deletions(-) diff --git a/socs/agent/prologix_interface.py b/socs/agent/prologix_interface.py index 57bf1bd54..76a0e869b 100644 --- a/socs/agent/prologix_interface.py +++ b/socs/agent/prologix_interface.py @@ -1,24 +1,24 @@ import socket -DEFAULT_ESCAPE = 'xYzZyX' - class PrologixInterface: - def __init__(self, ip, escape_string=DEFAULT_ESCAPE): - self.ip = ip - self.escape_string = escape_string + def __init__(self, ip_address, gpibAddr, **kwargs): + self.ip_address = ip_address + self.gpibAddr = gpibAddr self.sock = None self.conn_socket() self.configure() + super().__init__(**kwargs) def conn_socket(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.connect((self.ip, 1234)) + self.sock.connect((self.ip_address, 1234)) self.sock.settimeout(5) def configure(self): self.write('++mode 1\n') self.write('++auto 1\n') + self.write('++addr ' + str(self.gpibAddr)) def write(self, msg): message = msg + '\n' @@ -27,20 +27,10 @@ def write(self, msg): def read(self): return self.sock.recv(128).decode().rstrip('\n').rstrip('\r') - def identify(self): + def version(self): self.write('++ver') return self.read() - -class GpibInterface(PrologixInterface): - def __init__(self, ip_address, gpibAddr): - super().__init__(ip_address) - self.gpibAddr = gpibAddr - - def write(self, msg): - super().write('++addr ' + str(self.gpibAddr)) - super().write(msg) - def identify(self): self.write('*idn?') return self.read() diff --git a/socs/agent/scpi_psu_driver.py b/socs/agent/scpi_psu_driver.py index 3e3c19e18..44a555f72 100644 --- a/socs/agent/scpi_psu_driver.py +++ b/socs/agent/scpi_psu_driver.py @@ -1,11 +1,11 @@ # Tucker Elleflot -from socs.agent.prologix_interface import GpibInterface +from socs.agent.prologix_interface import PrologixInterface -class PsuInterface(GpibInterface): - def __init__(self, ip_address, gpibAddr, verbose=False): - super().__init__(ip_address, gpibAddr) +class PsuInterface(PrologixInterface): + def __init__(self, ip_address, gpibAddr, verbose=False, **kwargs): self.verbose = verbose + super().__init__(ip_address, gpibAddr, **kwargs) def enable(self, ch): ''' diff --git a/socs/agent/tektronix3021c_driver.py b/socs/agent/tektronix3021c_driver.py index c3f2c47fe..2ad971099 100644 --- a/socs/agent/tektronix3021c_driver.py +++ b/socs/agent/tektronix3021c_driver.py @@ -1,10 +1,10 @@ """Michael Randall mrandall@ucsd.edu""" -from socs.agent.prologix_interface import GpibInterface +from socs.agent.prologix_interface import PrologixInterface -class TektronixInterface(GpibInterface): +class TektronixInterface(PrologixInterface): def set_freq(self, freq): self.write('SOUR:FREQ {:.3f}\n'.format(freq)) From cddc953ef73b47bf2925a7cb9f5e445dec0ac44a Mon Sep 17 00:00:00 2001 From: Jake Spisak Date: Mon, 8 Mar 2021 17:46:52 -0800 Subject: [PATCH 33/43] Add proper logging --- agents/labjack/labjack_agent.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/agents/labjack/labjack_agent.py b/agents/labjack/labjack_agent.py index 2b1ec6730..38d810ae8 100644 --- a/agents/labjack/labjack_agent.py +++ b/agents/labjack/labjack_agent.py @@ -7,6 +7,8 @@ import csv from scipy.interpolate import interp1d import numpy as np +import txaio +txaio.use_twisted() ON_RTD = os.environ.get('READTHEDOCS') == 'True' if not ON_RTD: @@ -83,6 +85,7 @@ class LabJackFunctions: Labjack helper class to provide unit conversion from analog input voltage """ def __init__(self): + self.log = txaio.make_logger() pass def unit_conversion(self, v_array, function_info): @@ -141,7 +144,7 @@ def warm_therm(self, v_array): values = RtoT(R) except ValueError: - print('Temperature outside thermometer range') + self.log.error('Temperature outside thermometer range') values = -1000 + np.zeros(len(R)) units = 'C' @@ -181,7 +184,7 @@ def __init__(self, agent, ip_address, active_channels, function_file, self.functions = yaml.safe_load(stream) if self.functions is None: self.functions = {} - print(f"Applying conversion functions: {self.functions}") + self.log.info(f"Applying conversion functions: {self.functions}") self.initialized = False self.take_data = False @@ -224,7 +227,7 @@ def init_labjack_task(self, session, params=None): # Connect with the labjack self.handle = ljm.openS("ANY", "ANY", self.ip_address) info = ljm.getHandleInfo(self.handle) - print("\nOpened LabJack of type: %i, Connection type: %i,\n" + self.log.info("\nOpened LabJack of type: %i, Connection type: %i,\n" "Serial number: %i, IP address: %s, Port: %i" % (info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4])) @@ -274,12 +277,13 @@ def start_acq(self, session, params=None): try: scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) - except LJMError: #in case the stream is running - print("Stopping previous stream") + except LJMError as e: #in case the stream is running + self.log.error(e) + self.log.error("Stopping previous stream and starting new one") ljm.eStreamStop(self.handle) scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) - print(f"\nStream started with a scan rate of {scan_rate} Hz.") + self.log.info(f"\nStream started with a scan rate of {scan_rate} Hz.") cur_time = time.time() while self.take_data: @@ -328,7 +332,7 @@ def start_acq(self, session, params=None): self.agent.feeds['sensors'].flush_buffer() self.agent.feeds['sensors_downsampled'].flush_buffer() ljm.eStreamStop(self.handle) - print("Data stream stopped") + self.log.info("Data stream stopped") return True, 'Acquisition exited cleanly.' From d0478e63e5aa1b1e22b652b4d8798950394eaf9f Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Tue, 9 Mar 2021 09:34:42 -0500 Subject: [PATCH 34/43] Add env-setable loglevel and fix small PEP08 recommendations --- agents/labjack/labjack_agent.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/agents/labjack/labjack_agent.py b/agents/labjack/labjack_agent.py index 38d810ae8..9c2e8597e 100644 --- a/agents/labjack/labjack_agent.py +++ b/agents/labjack/labjack_agent.py @@ -86,7 +86,6 @@ class LabJackFunctions: """ def __init__(self): self.log = txaio.make_logger() - pass def unit_conversion(self, v_array, function_info): """ @@ -228,9 +227,9 @@ def init_labjack_task(self, session, params=None): self.handle = ljm.openS("ANY", "ANY", self.ip_address) info = ljm.getHandleInfo(self.handle) self.log.info("\nOpened LabJack of type: %i, Connection type: %i,\n" - "Serial number: %i, IP address: %s, Port: %i" % - (info[0], info[1], info[2], - ljm.numberToIP(info[3]), info[4])) + "Serial number: %i, IP address: %s, Port: %i" % + (info[0], info[1], info[2], + ljm.numberToIP(info[3]), info[4])) session.add_message("Labjack initialized") @@ -277,10 +276,10 @@ def start_acq(self, session, params=None): try: scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) - except LJMError as e: #in case the stream is running + except LJMError as e: # in case the stream is running self.log.error(e) self.log.error("Stopping previous stream and starting new one") - ljm.eStreamStop(self.handle) + ljm.eStreamStop(self.handle) scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) self.log.info(f"\nStream started with a scan rate of {scan_rate} Hz.") @@ -366,6 +365,9 @@ def make_parser(parser=None): if __name__ == '__main__': + # Start logging + txaio.start_logging(level=os.environ.get("LOGLEVEL", "info")) + site_parser = site_config.add_arguments() parser = make_parser(site_parser) From 10bba6456256e150cbeec8c22a5b951fc2d1fd56 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Thu, 25 Mar 2021 10:58:34 -0400 Subject: [PATCH 35/43] Remove mention of sisock data feed servers from 372 docs (#148) Remove outdated information from Agent docs Mostly related to: * sisock * Docker image locations * depends_on definitions --- docs/agents/bluefors_agent.rst | 3 +- docs/agents/lakeshore240.rst | 56 +++++++++++----------- docs/agents/lakeshore372.rst | 19 +------- docs/agents/pysmurf/pysmurf-archiver.rst | 2 - docs/agents/pysmurf/pysmurf-controller.rst | 2 - docs/agents/pysmurf/pysmurf-monitor.rst | 4 +- docs/simulators/ls240_simulator.rst | 6 +-- 7 files changed, 35 insertions(+), 57 deletions(-) diff --git a/docs/agents/bluefors_agent.rst b/docs/agents/bluefors_agent.rst index 4a2fdf89d..131d2351b 100644 --- a/docs/agents/bluefors_agent.rst +++ b/docs/agents/bluefors_agent.rst @@ -87,7 +87,6 @@ outline is: - Install ocs and socs - Configure your ocs-config file and perform the associated setup - Start the Bluefors agent and command it to acquire data via an OCS client -- Create a sisock-data-feed-server container for live monitoring Configuration File Examples --------------------------- @@ -111,7 +110,7 @@ Docker Example docker-compose configuration:: ocs-bluefors: - image: grumpy.physics.yale.edu/ocs-bluefors-agent:latest + image: simonsobs/ocs-bluefors-agent:latest hostname: ocs-docker volumes: - ${OCS_CONFIG_DIR}:/config:ro diff --git a/docs/agents/lakeshore240.rst b/docs/agents/lakeshore240.rst index bbedd3aaf..bb0bfb05f 100644 --- a/docs/agents/lakeshore240.rst +++ b/docs/agents/lakeshore240.rst @@ -66,10 +66,31 @@ configuration block that will automatically start data acquisition:: Each device requires configuration under 'agent-instances'. See the OCS site configs documentation for more details. -The following tasks are registered for the LS240 agent. +Docker Configuration +-------------------- -.. autoclass:: agents.lakeshore240.LS240_agent.LS240_Agent - :members: init_lakeshore_task, set_values, upload_cal_curve, acq, start_acq +The Lakeshore 240 Agent can (and probably should) be configured to run in a +Docker container. An example configuration is:: + + ocs-LSA24MA: + image: simonsobs/ocs-lakeshore240-agent:latest + devices: + - "/dev/LSA24MA:/dev/LSA24MA" + hostname: nuc-docker + volumes: + - ${OCS_CONFIG_DIR}:/config:ro + command: + - "--instance-id=LSA24MA" + - "--site-hub=ws://crossbar:8001/ws" + - "--site-http=http://crossbar:8001/call" + +The serial number will need to be updated in your configuration. The hostname +should also match your configured host in your OCS configuration file. The +site-hub and site-http need to point to your crossbar server, as described in +the OCS documentation. + +Initial Setup +------------- Out of the box, the Lakeshore 240 channels are not enabled or configured to correctly measure thermometers. To enable, you can use the ``set_values`` task @@ -91,28 +112,9 @@ set channel 1 of a lakeshore module to read a diode:: ls_client.set_values.start(channel=1, name="CHWP_01", **diode_params) ls_client.set_values.wait() +Agent API +--------- +The following tasks are registered for the LS240 agent. -Docker Configuration --------------------- - -The Lakeshore 240 Agent can (and probably should) be configured to run in a -Docker container. An example configuration is:: - - ocs-LSA24MA: - image: grumpy.physics.yale.edu/ocs-lakeshore240-agent:latest - depends_on: - - "crossbar" - devices: - - "/dev/LSA24MA:/dev/LSA24MA" - hostname: nuc-docker - volumes: - - ${OCS_CONFIG_DIR}:/config:ro - command: - - "--instance-id=LSA24MA" - - "--site-hub=ws://crossbar:8001/ws" - - "--site-http=http://crossbar:8001/call" - -The serial number will need to be updated in your configuration. The hostname -should also match your configured host in your OCS configuration file. The -site-hub and site-http need to point to your crossbar server, as described in -the OCS documentation. +.. autoclass:: agents.lakeshore240.LS240_agent.LS240_Agent + :members: init_lakeshore_task, set_values, upload_cal_curve, acq, start_acq diff --git a/docs/agents/lakeshore372.rst b/docs/agents/lakeshore372.rst index c97ebb6f2..dc5281ee2 100644 --- a/docs/agents/lakeshore372.rst +++ b/docs/agents/lakeshore372.rst @@ -38,7 +38,7 @@ The Lakeshore 372 Agent should be configured to run in a Docker container. An example configuration is:: ocs-LSA22YE: - image: grumpy.physics.yale.edu/ocs-lakeshore372-agent:latest + image: simonsobs/ocs-lakeshore372-agent:latest hostname: ocs-docker network_mode: "host" volumes: @@ -54,23 +54,6 @@ example configuration is:: system. In this example the crossbar server is running on localhost, ``127.0.0.1``, but on your network this may be different. -To view the 372 temperatures data feed in the live monitor an accompanying -data-feed server will need to be run. An example of this configuration is:: - - sisock-LSA22YE: - image: grumpy.physics.yale.edu/sisock-data-feed-server:latest - environment: - TARGET: LSA22YE # match to instance-id of agent to monitor, used for data feed subscription - NAME: 'LSA22YE' # will appear in sisock a front of field name - DESCRIPTION: "LS372 with two ROXes for calibration." - FEED: "temperatures" - logging: - options: - max-size: "20m" - max-file: "10" - -For additional configuration see the sisock data-feed-server documentation. - .. note:: The serial numbers here will need to be updated for your device. diff --git a/docs/agents/pysmurf/pysmurf-archiver.rst b/docs/agents/pysmurf/pysmurf-archiver.rst index f6d334985..ebac09d60 100644 --- a/docs/agents/pysmurf/pysmurf-archiver.rst +++ b/docs/agents/pysmurf/pysmurf-archiver.rst @@ -58,8 +58,6 @@ The docker-compose entry is similar to that of the pysmurf-monitor. For example: - ${OCS_CONFIG_DIR}:/config - /home/ocs:/home/ocs - /data:/data - depends_on: - - "crossbar" Archived Path -------------- diff --git a/docs/agents/pysmurf/pysmurf-controller.rst b/docs/agents/pysmurf/pysmurf-controller.rst index 3c830d593..670fee24f 100644 --- a/docs/agents/pysmurf/pysmurf-controller.rst +++ b/docs/agents/pysmurf/pysmurf-controller.rst @@ -85,8 +85,6 @@ named ``ocs-pysmurf-monitor`` might look something like:: - ${OCS_CONFIG_DIR}:/config - /data:/data - /path/to/dev/pysmurf/:/usr/local/src/pysmurf - depends_on: - - "crossbar" diff --git a/docs/agents/pysmurf/pysmurf-monitor.rst b/docs/agents/pysmurf/pysmurf-monitor.rst index 60c9c83a3..6181ea0c5 100644 --- a/docs/agents/pysmurf/pysmurf-monitor.rst +++ b/docs/agents/pysmurf/pysmurf-monitor.rst @@ -138,8 +138,6 @@ An example docker-compose entry might look like:: volumes: - ${OCS_CONFIG_DIR}:/config - /data:/data - depends_on: - - "crossbar" Where DB_HOST, DB, DB_USER, and DB_PW are set in the ``.env`` file in the same dir as -the docker-compose file. \ No newline at end of file +the docker-compose file. diff --git a/docs/simulators/ls240_simulator.rst b/docs/simulators/ls240_simulator.rst index 3e4399c89..92cd55c3f 100644 --- a/docs/simulators/ls240_simulator.rst +++ b/docs/simulators/ls240_simulator.rst @@ -48,9 +48,9 @@ docker-compose service configuration is shown here:: image: simonsobs/ocs-lakeshore240-simulator:latest hostname: ocs-docker -It is helpful to have other live monitor components such as Grafana and either -the sisock quick look components or an InfluxDB container for quickly -visualizing whether the 240 Agent is getting data from the simulator. +It is helpful to have other live monitor components such as Grafana and an +InfluxDB container for quickly visualizing whether the 240 Agent is getting +data from the simulator. Running Outside of Docker ------------------------- From 61711a2578ef92fb011856a11a6a97e4fecd7bd3 Mon Sep 17 00:00:00 2001 From: harukinishino <33713578+harukinishino@users.noreply.github.com> Date: Tue, 30 Mar 2021 00:09:36 +0900 Subject: [PATCH 36/43] New CHWP encoder readout agent (#80) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit for the CHWP BeagleboneBlack agent * Bug fix for CHWP agent, to run even w/o encoder signal input * another bug fix for the chwp agent * another bug fix for chwp * Modifications for the latest CHWP beaglebone software. Also, some cleanups * style changes suggensted by pylint * Modifications for CHWP encoder OCS agent to address most of the comments after the PR. Still needs testings at the actual setup: * This is still using computer system time for timestamps, but not using the same timestamps for arrays of data. Timestamps are approximately calculated using system time and clock counter values. This is convenient for looking at the data in grafana, when we have some issues in IRIG decoding or IRIG itself. * Added irg_time, unix timestamps decoded from IRIG * Added irig_info, to save raw IRIG bit info * Merged multiple data feeds into one. Currently, publish_to_feed happens several times within a few seconds. * Moved flush_buffer out of the loop * Added some docstrings * Clarified the version of beaglebone codes that can work with this agent using sha256sum. Maybe there is a better way to do this… Some other bug fixes/improvements: * Changed the data length for the encoder data in a packet, accordingly due to the change in the beaglebone side to avoid packet fragmentations * Reduced some standard output messages * Fixed the bug which was dropping some data unintentionally * Separate the part to publish encoder counter values from the IRIG publishing part, to take encoder data even without IRIG. * Approximate HWP frequency calculation uses the expected beaglebone clock frequency, not the decoded IRIG info. * Added the estimation of beaglebone clock frequency * Temporary(?) modifications for not publishing data feeds in uint64_t * encoder counter data subsampling for indluxdb-publisher * For int64 support * modification for adding a command-line argument for hwp bbb port number * added chwp agent in socs/docker-compose.yml. minimal document in docs/agents/chwp_encoder.rst * Initial commit for the CHWP BeagleboneBlack agent * Bug fix for CHWP agent, to run even w/o encoder signal input * another bug fix for the chwp agent * another bug fix for chwp * Modifications for the latest CHWP beaglebone software. Also, some cleanups * style changes suggensted by pylint * Modifications for CHWP encoder OCS agent to address most of the comments after the PR. Still needs testings at the actual setup: * This is still using computer system time for timestamps, but not using the same timestamps for arrays of data. Timestamps are approximately calculated using system time and clock counter values. This is convenient for looking at the data in grafana, when we have some issues in IRIG decoding or IRIG itself. * Added irg_time, unix timestamps decoded from IRIG * Added irig_info, to save raw IRIG bit info * Merged multiple data feeds into one. Currently, publish_to_feed happens several times within a few seconds. * Moved flush_buffer out of the loop * Added some docstrings * Clarified the version of beaglebone codes that can work with this agent using sha256sum. Maybe there is a better way to do this… Some other bug fixes/improvements: * Changed the data length for the encoder data in a packet, accordingly due to the change in the beaglebone side to avoid packet fragmentations * Reduced some standard output messages * Fixed the bug which was dropping some data unintentionally * Separate the part to publish encoder counter values from the IRIG publishing part, to take encoder data even without IRIG. * Approximate HWP frequency calculation uses the expected beaglebone clock frequency, not the decoded IRIG info. * Added the estimation of beaglebone clock frequency * Temporary(?) modifications for not publishing data feeds in uint64_t * encoder counter data subsampling for indluxdb-publisher * For int64 support * modification for adding a command-line argument for hwp bbb port number * added chwp agent in socs/docker-compose.yml. minimal document in docs/agents/chwp_encoder.rst * a little bit of error handling of invalid IRIG-B data * Fix small typos and add chwp to index * Protect ocs import on RTD build * Replace mix of tabs with spaces * Swap print statements for txaio logging * Add dumb-init and replace sisock in Dockerfile Co-authored-by: Charles Hill Co-authored-by: Brian Koopman --- agents/chwp/Dockerfile | 19 + agents/chwp/hwpbbb_agent.py | 669 +++++++++++++++++++++++++++++++++++ docker-compose.yml | 14 +- docs/agents/chwp_encoder.rst | 72 ++++ docs/index.rst | 1 + 5 files changed, 768 insertions(+), 7 deletions(-) create mode 100644 agents/chwp/Dockerfile create mode 100644 agents/chwp/hwpbbb_agent.py create mode 100644 docs/agents/chwp_encoder.rst diff --git a/agents/chwp/Dockerfile b/agents/chwp/Dockerfile new file mode 100644 index 000000000..ef1312a4e --- /dev/null +++ b/agents/chwp/Dockerfile @@ -0,0 +1,19 @@ +# CHWP Agent + +# Use socs base image +FROM socs:latest + +# Set the working directory to registry directory +WORKDIR /app/socs/agents/chwp/ + +# Copy this agent into the app/agents directory +COPY . . + +# Run registry on container startup +ENTRYPOINT ["dumb-init", "python3", "-u", "hwpbbb_agent.py"] + +# Sensible default arguments +CMD ["--site-hub=ws://crossbar:8001/ws", \ + "--site-http=http://crossbar:8001/call"] + +EXPOSE 8080/udp diff --git a/agents/chwp/hwpbbb_agent.py b/agents/chwp/hwpbbb_agent.py new file mode 100644 index 000000000..bfa4ac8df --- /dev/null +++ b/agents/chwp/hwpbbb_agent.py @@ -0,0 +1,669 @@ +"""OCS agent module to read the data from beagleboneblack for CHWP encoder + +Note +---- + This is confirmed to be work with the following versions of beagleboneblack software: + - currently hwpdaq branch in spt3g_software_sa repository: 731ff39 + (sha256sum) + - Encoder1.bin: c8281525bdd0efae66aede7cffc3520ab719cfd67f6c2d7fd01509a4289a9d32 + - Encoder2.bin: a6ed9d89e9cf26036bf1da9e7e2098da85bbfa6eb08d0caef1e3c40877dd5077 + - IRIG1.bin: 7bc37b30a1759eb792f0db176bcd6080f9c3c7ec78ba2e1614166b2031416091 + - IRIG2.bin: d206dd075f73c32684d8319c9ed19f019cc705a9f253725de085eb511b8c0a12 + +Data feeds +---------- +HWPEncoder: + (HWPEncoder_counter_sub) + counter_sub: subsampled counter values [::NUM_SUBSAMPLE] + counter_index_sub: subsampled index counter values + + (HWPEncoder_freq) + approx_hwp_freq: approximate estimate of hwp rotation frequency + diff_counter_mean: mean of diff(counter) + diff_index_mean: mean of diff(counter_index) + diff_counter_std: std of diff(counter) + diff_index_std: std of diff(counter_index) + + (HWPEncoder_quad) + quad: quadrature data + + (HWPEncoder_irig) + irig_time: decoded time in second since the unix epoch + rising_edge_cont: BBB clcok count values + for the IRIG on-time reference marker risinge edge + irig_sec: seconds decoded from IRIG-B + irig_min: minutes decoded from IRIG-B + irig_hour: hours decoded from IRIG-B + irig_day: days decoded from IRIG-B + irig_year: years decoded from IRIG-B + bbb_clock_freq: BBB clock frequency estimate using IRIG-B + + (HWPEncoder_irig_raw) + irig_synch_pulse_clock_time: reference marker time in sec + irig_synch_pulse_clock_counts: clock counts for reference markers + irig_info: IRIG bit info + +HWPEncoder_full: separated feed for full-sample HWP encoder data, + not to be included in influxdb database + (HWPEncoder_counter) + counter: BBB counter values for encoder signal edges + counter_index: index numbers for detected edges by BBB +""" + +import socket +import struct +import time +import calendar +from collections import deque +import select +import numpy as np +import txaio +txaio.use_twisted() + +## Required by OCS +ON_RTD = os.environ.get('READTHEDOCS') == 'True' +if not ON_RTD: + from ocs import ocs_agent, site_config + from ocs.ocs_twisted import TimeoutLock + +## These three values (COUNTER_INFO_LENGTH, COUNTER_PACKET_SIZE, IRIG_PACKET_SIZE) +## should be consistent with the software on beaglebone. +# The number of datapoints in every encoder packet from the Beaglebone +COUNTER_INFO_LENGTH = 120 +# The size of the encoder packet from the beaglebone +# (header + 3*COUNTER_INFO_LENGTH datapoint information + 1 quadrature readout) +COUNTER_PACKET_SIZE = 4 + 4 * COUNTER_INFO_LENGTH+8 * COUNTER_INFO_LENGTH + 4 +# The size of the IRIG packet from the Beaglebone +IRIG_PACKET_SIZE = 132 + +# The slit scaler value for rough HWP rotating frequency +NUM_SLITS = 570 +# Number of encoder counter samples to publish at once +NUM_ENCODER_TO_PUBLISH = 4200 +# Seconds to publish encoder data even before reaching NUM_ENCODER_TO_PUBLISH +SEC_ENCODER_TO_PUBLISH = 10 +# Subsampling facot for the encoder counter data to influxdb +NUM_SUBSAMPLE = 500 + +### Definitions of utility functions ### + +def de_irig(val, base_shift=0): + """Converts the IRIG signal into sec/min/hours/day/year depending on the parameters + + Parameters + ---------- + val : int + raw IRIG bit info of each 100msec chunk + base_shift : int, optional + number of bit shifts. This should be 0 except for seccods + + Returns + ------- + int + Either of sec/min/hourds/day/year + + """ + return (((val >> (0+base_shift)) & 1) + + ((val >> (1+base_shift)) & 1) * 2 + + ((val >> (2+base_shift)) & 1) * 4 + + ((val >> (3+base_shift)) & 1) * 8 + + ((val >> (5+base_shift)) & 1) * 10 + + ((val >> (6+base_shift)) & 1) * 20 + + ((val >> (7+base_shift)) & 1) * 40 + + ((val >> (8+base_shift)) & 1) * 80) + +def count2time(counts, t_offset=0.): + """Quick etimation of time using Beagleboneblack clock counts + + Parameters + ---------- + counts : list of int + Beagleboneblack clock counter value + t_offset : int, optional + time offset in seconds + + Returns + ------- + list of float + Estimated time in seconds assuming the Beagleboneblack clock frequency is 200 MHz. + Without specifying t_offset, output is just the difference + from the first sample in the input list + + """ + t_array = np.array(counts, dtype=float) - counts[0] + # Assuming BBB clock is 200MHz + t_array *= 5.e-9 + t_array += t_offset + + return t_array.tolist() + +class EncoderParser: + """Class which will parse the incoming packets from the BeagleboneBlack and store the data + + Attributes + ---------- + counter_queue : deque object + deque to store the encoder counter data + irig_queue : deque object + deque to store the IRIG data + is_start : int + Used for procedures that only run when data collection begins + Initialized to be 1, until the first IRIG parsing happens and set to 0 + start_time : list of int + Will hold the time at which data collection started [hours, mins, secs] + current_time : int + Current unix timestamp in seconds parased from IRIG + sock : scoket.sock + a UDP socket to connect to the Beagleboneblack + data : str + String which will hold the raw data from the Beaglebone before it is parsed + read_chunk_size : int + Maximum data size to receive UDP packets in bytes + + Parameters + ---------- + beaglebone_port : int, optional + Port number to receive UDP packets from Beagleboneblack + This must be the same as the localPort in the Beaglebone code + read_chunk_size : int, optional + Maximum data size to receive UDP packets in bytes + read_chunk_size: This value shouldn't need to change + + """ + def __init__(self, beaglebone_port=8080, read_chunk_size=8196): + # Creates twoe queues to hold the data from the encoder, IRIG, and quadrature respectively + self.counter_queue = deque() + self.irig_queue = deque() + + # Used for procedures that only run when data collection begins + self.is_start = 1 + # Will hold the time at which data collection started [hours, mins, secs] + self.start_time = [0, 0, 0] + # Will be continually updated with unix in seconds + self.current_time = 0 + + # Creates a UDP socket to connect to the Beaglebone + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + # Binds the socket to a specific ip address and port + # The ip address can be blank for accepting any UDP packet to the port + self.sock.bind(('', beaglebone_port)) + #self.sock.setblocking(0) + + # String which will hold the raw data from the Beaglebone before it is parsed + self.data = '' + self.read_chunk_size = read_chunk_size + + self.log = txaio.make_logger() + + def pretty_print_irig_info(self, irig_info, edge, print_out=False): + """Takes the IRIG information, prints it to the screen, sets the current time, + and returns the current time + + Parameters + ---------- + irig_info : list of int + IRIG bit info + edge : int + Clock count of rising edge of a reference marker bit + print_out : bool, optional + Set True to print out the parsed timestamp + + Returns + ------- + current_time : int + Current unix timestamp in seconds parased from IRIG + + """ + # Calls self.de_irig() to get the sec/min/hour of the IRIG packet + secs = de_irig(irig_info[0], 1) + mins = de_irig(irig_info[1], 0) + hours = de_irig(irig_info[2], 0) + day = de_irig(irig_info[3], 0) \ + + de_irig(irig_info[4], 0) * 100 + year = de_irig(irig_info[5], 0) + + # If it is the first time that the function is called then set self.start_time + # to the current time + if self.is_start == 1: + self.start_time = [hours, mins, secs] + self.is_start = 0 + + if print_out: + # Find the sec/min/hour digit difference from the start time + dsecs = secs - self.start_time[2] + dmins = mins - self.start_time[1] + dhours = hours - self.start_time[0] + + # Corrections to make sure that dsecs/dmins/dhours are all positive + if dhours < 0: + dhours = dhours + 24 + + if (dmins < 0) or ((dmins == 0) and (dsecs < 0)): + dmins = dmins + 60 + dhours = dhours - 1 + + if dsecs < 0: + dsecs = dsecs + 60 + dmins = dmins - 1 + + # Print UTC time, run time, and current clock count of the beaglebone + print('Current Time:', ('%d:%d:%d'%(hours, mins, secs)), \ + 'Run Time', ('%d:%d:%d'%(dhours, dmins, dsecs)), \ + 'Clock Count', edge) + + # Set the current time in seconds (changed to seconds from unix epoch) + #self.current_time = secs + mins*60 + hours*3600 + try: + st_time = time.strptime("%d %d %d:%d:%d"%(year, day, hours, mins, secs), \ + "%y %j %H:%M:%S") + self.current_time = calendar.timegm(st_time) + except ValueError: + self.log.error(f'Invalid IRIG-B timestamp: {year} {day} {hours} {mins} {secs}') + self.current_time = -1 + + return self.current_time + + def check_data_length(self, start_index, size_of_read): + """Checks to make sure that self.data is the right size + Return false if the wrong size, return true if the data is the right size + + Parameters + ---------- + start_index : int + first index of the data to read + size_of_read : int + data size to read in bytes + + Returns + ------- + bool + False if the current data size is smaller than the data size suggested by the header info + + """ + if start_index + size_of_read > len(self.data): + self.data = self.data[start_index:] + return False + + return True + + def grab_and_parse_data(self): + """Grabs self.data, determine what packet it corresponds to, parses the data. + This is a while loop to look for an appropriate header in a packet from beaglebone. + Then, the data will be passed to an appropriate parsing method + and stored in either of counter_queue or irig_queue. + The detailed structure of the queues can be found in parse_counter_info/parse_irig_info. + + If unexpected data length found, this will output some messages: + Error 0: data length is shorter than the header size (4 bytes) + Error 1: data length is shorter than the encoder counter info + even though the encoder packet header is found. + Error 2: data length is shorter than the IRIG info + even though the IRIG packet header is found. + """ + while True: + # If there is data from the socket attached to the beaglebone then + # ready[0] = true + # If not then continue checking for 2 seconds and if there is still no data + # ready[0] = false + ready = select.select([self.sock], [], [], 2) + if ready[0]: + # Add the data from the socket attached to the beaglebone + # to the self.data string + data = self.sock.recv(self.read_chunk_size) + if len(self.data) > 0: + self.data += data + else: + self.data = data + + while True: + # Check to make sure that there is at least 1 int in the packet + # The first int in every packet should be the header + if not self.check_data_length(0, 4): + self.log.error('Error 0') + break + + header = self.data[0:4] + # Convert a structure value from the beaglebone (header) to an int + header = struct.unpack('= COUNTER_PACKET_SIZE: + self.data = self.data[COUNTER_PACKET_SIZE:] + + # IRIG + elif header == 0xcafe: + # Make sure the data is the correct length for an IRIG Packet + if not self.check_data_length(0, IRIG_PACKET_SIZE): + self.log.error('Error 2') + break + # Call the meathod self.parse_irig_info() to parse the IRIG Packet + self.parse_irig_info(self.data[4 : IRIG_PACKET_SIZE]) + if len(self.data) >= IRIG_PACKET_SIZE: + self.data = self.data[IRIG_PACKET_SIZE:] + + # Error + # An Error Packet will be sent if there is a timing error in the + # synchronization pulses of the IRIG packet + # If you see 'Packet Error' check to make sure the IRIG is functioning as + # intended and that all the connections are made correctly + elif header == 0xe12a: + self.log.error('Packet Error') + # Clear self.data + self.data = '' + elif header == 0x1234: + self.log.error('Received timeout packet.') + # Clear self.data + self.data = '' + else: + self.log.error('Bad header') + # Clear self.data + self.data = '' + + if len(self.data) == 0: + break + break + + # If there is no data from the beaglebone 'Looking for data ...' will print + # If you see this make sure that the beaglebone has been set up properly + # print('Looking for data ...') + + def parse_counter_info(self, data): + """Method to parse the Encoder Packet and put them to counter_queue + + Parameters + ---------- + data : str + string for the encoder ounter info + + Note: + 'data' structure: + (Please note that '150' below might be replaced by COUNTER_INFO_LENGTH) + [0] Readout from the quadrature + [1-150] clock counts of 150 data points + [151-300] corresponding clock overflow of the 150 data points (each overflow count + is equal to 2^16 clock counts) + [301-450] corresponding absolute number of the 150 data points ((1, 2, 3, etc ...) + or (150, 151, 152, etc ...) or (301, 302, 303, etc ...) etc ...) + + counter_queue structure: + counter_queue = [[64 bit clock counts], + [clock count indicese incremented by every edge], + quadrature, + current system time] + """ + + # Convert the Encoder Packet structure into a numpy array + derter = np.array(struct.unpack('<' + 'I'+ 'III'*COUNTER_INFO_LENGTH, data)) + + # self.quad_queue.append(derter[0].item()) # merged to counter_queue + self.counter_queue.append((derter[1:COUNTER_INFO_LENGTH+1]\ + + (derter[COUNTER_INFO_LENGTH+1:2*COUNTER_INFO_LENGTH+1] << 32), \ + derter[2*COUNTER_INFO_LENGTH+1:3*COUNTER_INFO_LENGTH+1], \ + derter[0].item(), time.time())) + + def parse_irig_info(self, data): + """Method to parse the IRIG Packet and put them to the irig_queue + + Parameters + ---------- + data : str + string for the IRIG info + + Note + ---- + 'data' structure: + [0] clock count of the IRIG Packet which the UTC time corresponds to + [1] overflow count of initial rising edge + [2] binary encoding of the second data + [3] binary encoding of the minute data + [4] binary encoding of the hour data + [5-11] additional IRIG information which we do mot use + [12-21] synchronization pulse clock counts + [22-31] overflow count at each synchronization pulse + + irig_queue structure: + irig_queue = [Packet clock count, + Packet UTC time in sec, + [binary encoded IRIG data], + [synch pulses clock counts], + current system time] + + """ + + # Convert the IRIG Packet structure into a numpy array + unpacked_data = struct.unpack(' 0 and irig_time > 0: + bbb_clock_freq = float(rising_edge_count - self.rising_edge_count) \ + / (irig_time - self.irig_time) + else: + bbb_clock_freq = 0. + data['data']['bbb_clock_freq'] = bbb_clock_freq + + self.agent.publish_to_feed('HWPEncoder', data) + self.rising_edge_count = rising_edge_count + self.irig_time = irig_time + + # saving clock counts for every refernce edge and every irig bit info + data = {'timestamps':[], 'block_name':'HWPEncoder_irig_raw', 'data':{}} + # 0.09: time difference in seconds b/w reference marker and + # the first index marker + data['timestamps'] = sys_time + 0.09 + np.arange(10) * 0.1 + data['data']['irig_synch_pulse_clock_time'] = list(irig_time + 0.09 + \ + np.arange(10) * 0.1) + data['data']['irig_synch_pulse_clock_counts'] = synch_pulse_clock_counts + data['data']['irig_info'] = list(irig_info) + self.agent.publish_to_feed('HWPEncoder', data) + + ## Reducing the packet size, less frequent publishing + # Encoder data; packet coming rate = 570*2*2/150/4 ~ 4Hz packet at 2 Hz rotation + while len(self.parser.counter_queue): + counter_data = self.parser.counter_queue.popleft() + + counter_list += counter_data[0].tolist() + counter_index_list += counter_data[1].tolist() + + quad_data = counter_data[2] + sys_time = counter_data[3] + + received_time_list.append(sys_time) + quad_list.append(quad_data) + quad_counter_list.append(counter_data[0][0]) + ct = time.time() + if len(counter_list) >= NUM_ENCODER_TO_PUBLISH \ + or (len(counter_list) \ + and (ct - time_encoder_published) > SEC_ENCODER_TO_PUBLISH): + # Publishing quadratic data first + data = {'timestamps':[], 'block_name':'HWPEncoder_quad', 'data':{}} + data['timestamps'] = received_time_list + data['data']['quad'] = quad_list + self.agent.publish_to_feed('HWPEncoder', data) + + # Publishing counter data + # (full sampled data will not be recorded in influxdb) + data = {'timestamps':[], 'block_name':'HWPEncoder_counter', 'data':{}} + data['data']['counter'] = counter_list + data['data']['counter_index'] = counter_index_list + + data['timestamps'] = count2time(counter_list, received_time_list[0]) + self.agent.publish_to_feed('HWPEncoder_full', data) + + ## Subsampled data for influxdb display + data_subsampled = {'block_name':'HWPEncoder_counter_sub', 'data':{}} + data_subsampled['timestamps'] = np.array(data['timestamps'])\ + [::NUM_SUBSAMPLE].tolist() + data_subsampled['data']['counter_sub'] = np.array(counter_list)\ + [::NUM_SUBSAMPLE].tolist() + data_subsampled['data']['counter_index_sub'] = np.array(counter_index_list)\ + [::NUM_SUBSAMPLE].tolist() + self.agent.publish_to_feed('HWPEncoder', data_subsampled) + + # For rough estimation of HWP rotation frequency + data = {'timestamp': received_time_list[0], + 'block_name':'HWPEncoder_freq', 'data':{}} + dclock_counter = counter_list[-1] - counter_list[0] + dindex_counter = counter_index_list[-1] - counter_index_list[0] + # Assuming Beagleboneblack clock is 200 MHz + pulse_rate = dindex_counter * 2.e8 / dclock_counter + hwp_freq = pulse_rate / 2. / NUM_SLITS + + diff_counter = np.diff(counter_list) + diff_index = np.diff(counter_index_list) + + self.log.info(f'pulse_rate {pulse_rate} {hwp_freq}') + data['data']['approx_hwp_freq'] = hwp_freq + data['data']['diff_counter_mean'] = np.mean(diff_counter) + data['data']['diff_index_mean'] = np.mean(diff_index) + data['data']['diff_counter_std'] = np.std(diff_counter) + data['data']['diff_index_std'] = np.std(diff_index) + self.agent.publish_to_feed('HWPEncoder', data) + + # Initialize lists + counter_list = [] + counter_index_list = [] + quad_list = [] + quad_counter_list = [] + received_time_list = [] + + time_encoder_published = ct + + self.agent.feeds['HWPEncoder'].flush_buffer() + return True, 'Acquisition exited cleanly.' + + def stop_acq(self, session, params=None): + """ + Stops the data acquisiton. + """ + if self.take_data: + self.take_data = False + return True, 'requested to stop taking data.' + + return False, 'acq is not currently running.' + +# Portion of the code that runs +if __name__ == '__main__': + parser = site_config.add_arguments() + pgroup = parser.add_argument_group('Agent Options') + pgroup.add_argument('--port', default=8080) + args = parser.parse_args() + + site_config.reparse_args(args, 'HWPBBBAgent') + agent, runner = ocs_agent.init_site_agent(args) + hwp_bbb_agent = HWPBBBAgent(agent, port=args.port) + agent.register_process('acq', hwp_bbb_agent.start_acq, hwp_bbb_agent.stop_acq, startup=True) + + runner.run(agent, auto_reconnect=True) diff --git a/docker-compose.yml b/docker-compose.yml index 09de0eb71..64cd482f7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -124,19 +124,19 @@ services: build: ./agents/tektronix3021c/ # -------------------------------------------------------------------------- - # SOCS Simulators + # CHWP Encoder BBB agent # -------------------------------------------------------------------------- + ocs-hwpbbb-agent: + image: "ocs-hwpbbb-agent" + build: ./agents/chwp/ # -------------------------------------------------------------------------- - # Lakeshore240 Simulator + # SOCS Simulators # -------------------------------------------------------------------------- - ocs-lakeshore240-simulator: - image: "ocs-lakeshore240-simulator" - build: ./simulators/lakeshore240/ - + # -------------------------------------------------------------------------- # Lakeshore240 Simulator # -------------------------------------------------------------------------- ocs-lakeshore240-simulator: image: "ocs-lakeshore240-simulator" - build: ./simulators/lakeshore240/ + build: ./simulators/lakeshore240/ diff --git a/docs/agents/chwp_encoder.rst b/docs/agents/chwp_encoder.rst new file mode 100644 index 000000000..02ce3fc24 --- /dev/null +++ b/docs/agents/chwp_encoder.rst @@ -0,0 +1,72 @@ +.. highlight:: rst + +.. _chwp_encoder: + +====================== +CHWP Encoder BBB Agent +====================== + +The optical encoder signals of the CHWP are captured by Beaglebone Black (BBB) +boards with the IRIG-B timing reference. +This agent receives and decodes UDP packets from BBB and publishes the data +feeds. + +Configuration File Examples +--------------------------- +Below are useful configurations examples for the relevant OCS files and for +running the agent in a docker container. + +ocs-config +`````````` +To configure the CHWP encoder BBB agent we need to add a HWPBBBAgent +block to our ocs configuration file. Here is an example configuration block +using all of the available arguments:: + + {'agent-class': 'HWPBBBAgent', + 'instance-id': 'HBA0', + 'arguments': [ + ['--port', '8080'], + ]} + {'agent-class': 'HWPBBBAgent', + 'instance-id': 'HBA1', + 'arguments': [ + ['--port', '8081'], + ]} + +This is an example to run two agents because we usually have a couple of +BBBs for A and B phase of the optical encoder for some redundancy. +Multiple BBBs on the same network are distinguished by port numbers. +You should assign a port for each BBB, which should be consistent with +the setting on the BBB side. + +Docker +`````` +The CHWP BBB agent can be run via a Docker container. The following is an +example of what to insert into your institution's docker-compose file. +This again is an example to run multiple agents:: + + ocs-hwpbbb-agent-HBA0: + image: simonsobs/ocs-hwpbb-agent:latest + ports: + - "8080:8080/udp" + hostname: ocs-docker + volumes: + - ${OCS_CONFIG_DIR}:/config:ro + command: + - "--instance-id=HBA0" + - "--site-hub=ws://crossbar:8001/ws" + - "--site-http=http://crossbar:8001/call" + + ocs-hwpbbb-agent-HBA1: + image: simonsobs/ocs-hwpbb-agent:latest + ports: + - "8081:8081/udp" + hostname: ocs-docker + volumes: + - ${OCS_CONFIG_DIR}:/config:ro + command: + - "--instance-id=HBA1" + - "--site-hub=ws://crossbar:8001/ws" + - "--site-http=http://crossbar:8001/call" + + diff --git a/docs/index.rst b/docs/index.rst index 8521ade03..7318e51af 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,6 +37,7 @@ Simulator Reference Simulators are used to mock software and hardware :maxdepth: 2 agents/bluefors_agent + agents/chwp_encoder agents/cryomech_cpa agents/labjack agents/lakeshore240 From 46558e06098686ce9f05b32dee21033f5399abcc Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Tue, 23 Mar 2021 19:31:43 +0000 Subject: [PATCH 37/43] Add stream-id argument to smurf stream simulator agent --- agents/smurf_stream_simulator/smurf_stream_simulator.py | 6 ++++-- docs/simulators/smurf_stream_simulator.rst | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/agents/smurf_stream_simulator/smurf_stream_simulator.py b/agents/smurf_stream_simulator/smurf_stream_simulator.py index 1e9b7f593..d9fd2b3d0 100644 --- a/agents/smurf_stream_simulator/smurf_stream_simulator.py +++ b/agents/smurf_stream_simulator/smurf_stream_simulator.py @@ -220,7 +220,6 @@ def _send_start_flowcontrol_frame(self): f['sostream_id'] = self.stream_id self.writer.Process(f) - def _send_end_flowcontrol_frame(self): """Send END flowcontrol frame.""" if self.writer is not None: @@ -347,6 +346,8 @@ def make_parser(parser=None): help="Port to listen on.") pgroup.add_argument("--num-chans", default=528, help="Number of detector channels to simulate.") + pgroup.add_argument("--stream-id", default="stream_sim", + help="Stream ID for the simulator.") return parser @@ -364,7 +365,8 @@ def make_parser(parser=None): agent, runner = ocs_agent.init_site_agent(args) sim = SmurfStreamSimulator(agent, target_host=args.target_host, port=int(args.port), - num_chans=int(args.num_chans)) + num_chans=int(args.num_chans), + stream_id=args.stream_id) agent.register_process('stream', sim.start_background_streamer, sim.stop_background_streamer, diff --git a/docs/simulators/smurf_stream_simulator.rst b/docs/simulators/smurf_stream_simulator.rst index d8e859fd2..b725257f5 100644 --- a/docs/simulators/smurf_stream_simulator.rst +++ b/docs/simulators/smurf_stream_simulator.rst @@ -32,7 +32,8 @@ using all of the available arguments:: 'instance-id': 'smurf-stream', 'arguments': [['--auto-start', True], ['--port', '50000'], - ['--num_chans', '528']]}, + ['--num_chans', '528'], + ['--stream-id', 'stream_sim']]}, Docker `````` @@ -42,6 +43,8 @@ docker-compose service configuration is shown here:: smurf-stream-sim: image: simonsobs/smurf-stream-sim hostname: ocs-docker + ports: + - "50000:50000" volumes: - ${OCS_CONFIG_DIR}:/config:ro From 12d7bc43edec6d5cc33c0d230257c4a8dd8f24ad Mon Sep 17 00:00:00 2001 From: Katie Harrington Date: Tue, 6 Apr 2021 10:10:18 -0500 Subject: [PATCH 38/43] Add XY Stage Agent for LATRt Stages (#153) * Add XY Stage controller for LATRt XY Stages (movement still to be tested) * fix locking bugs in movement. make data start automatically * Add XY Stage controller for LATRt XY Stages (movement still to be tested) * fix locking bugs in movement. make data start automatically * actually reset lock release time * use Pacemaker for nicer data! * remove random sleep * fixes based on pull request comments * documentation debugging fixes * removing underscores from name * update ocs arg parsing to new version * remove underscores * Make Agent class name Co-authored-by: kmharrington Co-authored-by: Brian Koopman --- agents/ocs_plugin_so.py | 1 + agents/xy_stage/xy_latrt_agent.py | 249 ++++++++++++++++++++++++++++++ docs/agents/latrt_xy_stage.rst | 72 +++++++++ docs/index.rst | 1 + 4 files changed, 323 insertions(+) create mode 100644 agents/xy_stage/xy_latrt_agent.py create mode 100644 docs/agents/latrt_xy_stage.rst diff --git a/agents/ocs_plugin_so.py b/agents/ocs_plugin_so.py index b08da8d2c..166059571 100644 --- a/agents/ocs_plugin_so.py +++ b/agents/ocs_plugin_so.py @@ -16,5 +16,6 @@ ('BlueforsAgent', 'bluefors/bluefors_log_tracker.py'), ('HWPSimulatorAgent', 'hwp_sim/hwp_simulator_agent.py'), ('CryomechCPAAgent', 'cryomech_cpa/cryomech_cpa_agent.py'), + ('LATRtXYStageAgent', 'xy_stage/xy_latrt_agent.py'), ]: ocs.site_config.register_agent_class(n, os.path.join(root, f)) diff --git a/agents/xy_stage/xy_latrt_agent.py b/agents/xy_stage/xy_latrt_agent.py new file mode 100644 index 000000000..a3100d930 --- /dev/null +++ b/agents/xy_stage/xy_latrt_agent.py @@ -0,0 +1,249 @@ +import os +import argparse +import time +import txaio + + +ON_RTD = os.environ.get('READTHEDOCS') == 'True' +if not ON_RTD: + from ocs import ocs_agent, site_config + from ocs.ocs_twisted import TimeoutLock, Pacemaker + + ## yes I shouldn't have named that module agent + from xy_agent.xy_connect import XY_Stage + +class LATRtXYStageAgent: + """ + Agent for connecting to the LATRt XY Stages + + Args: + ip_addr: IP address where RPi server is running + port: Port the RPi Server is listening on + mode: 'acq': Start data acquisition on initialize + samp: default sampling frequency in Hz + """ + + def __init__(self, agent, ip_addr, port, mode=None, samp=2): + + self.ip_addr = ip_addr + self.port = port + + self.xy_stage = None + self.initialized = False + self.take_data = False + self.is_moving = False + + self.agent = agent + self.log = agent.log + self.lock = TimeoutLock() + + if mode == 'acq': + self.auto_acq = True + else: + self.auto_acq = False + self.sampling_frequency = float(samp) + + ### register the position feeds + agg_params = { + 'frame_length' : 10*60, #[sec] + } + + self.agent.register_feed('positions', + record = True, + agg_params = agg_params, + buffer_time = 0) + + def init_xy_stage_task(self, session, params=None): + """init_xy_stage_task(params=None) + Perform first time setup for communivation with XY stages. + + Args: + params (dict): Parameters dictionary for passing parameters to + task. + """ + + if params is None: + params = {} + + self.log.debug("Trying to acquire lock") + with self.lock.acquire_timeout(timeout=0, job='init') as acquired: + # Locking mechanism stops code from proceeding if no lock acquired + if not acquired: + self.log.warn("Could not start init because {} is already running".format(self.lock.job)) + return False, "Could not acquire lock." + # Run the function you want to run + self.log.debug("Lock Acquired Connecting to Stages") + + self.xy_stage = XY_Stage(self.ip_addr, self.port) + self.xy_stage.init_stages() + print("XY Stages Initialized") + + # This part is for the record and to allow future calls to proceed, + # so does not require the lock + self.initialized = True + if self.auto_acq: + self.agent.start('acq') + return True, 'XY Stages Initialized.' + + def move_x_cm(self, session, params): + """ + params: + dict: { 'distance': float, 'velocity':float < 1.2} + """ + + with self.lock.acquire_timeout(timeout=3, job='move_x_cm') as acquired: + if not acquired: + self.log.warn(f"Could not start x move because lock held by {self.lock.job}") + return False + self.xy_stage.move_x_cm( params.get('distance',0), params.get('velocity',1)) + + time.sleep(1) + while True: + ## data acquisition updates the moving field if it is running + if not self.take_data: + with self.lock.acquire_timeout(timeout=3, job='move_x_cm') as acquired: + if not acquired: + self.log.warn(f"Could not check because lock held by {self.lock.job}") + return False, "Could not acquire lock" + self.is_moving = self.xy_stage.moving + + if not self.is_moving: + break + return True, "X Move Complete" + + def move_y_cm(self, session, params): + """ + params: + dict: { 'distance': float, 'velocity':float < 1.2} + """ + + with self.lock.acquire_timeout(timeout=3, job='move_y_cm') as acquired: + if not acquired: + self.log.warn(f"Could not start y move because lock held by {self.lock.job}") + return False, "could not acquire lock" + self.xy_stage.move_y_cm( params.get('distance',0), params.get('velocity',1)) + + time.sleep(1) + while True: + ## data acquisition updates the moving field if it is running + if not self.take_data: + with self.lock.acquire_timeout(timeout=3, job='move_y_cm') as acquired: + if not acquired: + self.log.warn(f"Could not check for move because lock held by {self.lock.job}") + return False, "could not acquire lock" + self.is_moving = self.xy_stage.moving + if not self.is_moving: + break + return True, "Y Move Complete" + + + def set_position(self, session, params): + """ + params: + dict: {'position': (float, float)} + """ + with self.lock.acquire_timeout(timeout=3, job='set_position') as acquired: + if not acquired: + self.log.warn(f"Could not set position because lock held by {self.lock.job}") + return False, "Could not acquire lock" + + self.xy_stage.position = params['position'] + return True, "Position Updated" + + def start_acq(self, session, params=None): + """ + params: + dict: {'sampling_frequency': float, sampling rate in Hz} + """ + if params is None: + params = {} + + + f_sample = params.get('sampling_frequency', self.sampling_frequency) + pm = Pacemaker(f_sample, quantize=True) + + if not self.initialized or self.xy_stage is None: + raise Exception("Connection to XY Stages not initialized") + + with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: + if not acquired: + self.log.warn("Could not start acq because {} is already running".format(self.lock.job)) + return False, "Could not acquire lock." + + self.log.info(f"Starting Data Acquisition for XY Stages at {f_sample} Hz") + session.set_status('running') + self.take_data = True + last_release = time.time() + + while self.take_data: + if time.time()-last_release > 1.: + if not self.lock.release_and_acquire(timeout=10): + self.log.warn(f"Could not re-acquire lock now held by {self.lock.job}.") + return False, "could not re-acquire lock" + last_release = time.time() + pm.sleep() + + data = {'timestamp':time.time(), 'block_name':'positions','data':{}} + pos = self.xy_stage.position + self.is_moving = self.xy_stage.moving + + data['data']['x'] = pos[0] + data['data']['y'] = pos[1] + + self.agent.publish_to_feed('positions',data) + + return True, 'Acquisition exited cleanly.' + + def stop_acq(self, session, params=None): + """ + params: + dict: {} + """ + if self.take_data: + self.take_data = False + return True, 'requested to stop taking data.' + else: + return False, 'acq is not currently running.' + +def make_parser(parser=None): + """Build the argument parser for the Agent. Allows sphinx to automatically + build documentation based on this function. + """ + if parser is None: + parser = argparse.ArgumentParser() + + # Add options specific to this agent. + pgroup = parser.add_argument_group('Agent Options') + pgroup.add_argument('--ip-address') + pgroup.add_argument('--port') + pgroup.add_argument('--mode') + pgroup.add_argument('--sampling_frequency') + return parser + + +if __name__ == '__main__': + # For logging + txaio.use_twisted() + LOG = txaio.make_logger() + + # Start logging + txaio.start_logging(level=os.environ.get("LOGLEVEL", "info")) + + parser = make_parser() + + # Interpret options in the context of site_config. + args = site_config.parse_args(agent_class = 'LATRtXYStageAgent', parser=parser) + + + agent, runner = ocs_agent.init_site_agent(args) + + xy_agent = LATRtXYStageAgent(agent, args.ip_address, args.port, args.mode, args.sampling_frequency) + + agent.register_task('init_xy_stage', xy_agent.init_xy_stage_task) + agent.register_task('move_x_cm', xy_agent.move_x_cm) + agent.register_task('move_y_cm', xy_agent.move_y_cm) + agent.register_task('set_position', xy_agent.set_position) + + agent.register_process('acq', xy_agent.start_acq, xy_agent.stop_acq) + + runner.run(agent, auto_reconnect=True) diff --git a/docs/agents/latrt_xy_stage.rst b/docs/agents/latrt_xy_stage.rst new file mode 100644 index 000000000..d9e1ee3c7 --- /dev/null +++ b/docs/agents/latrt_xy_stage.rst @@ -0,0 +1,72 @@ +.. highlight:: rst + +.. _latrt_xy_stage: + +===================== +LATRt XY Stage Agent +===================== + +This agent is used to communicate with the XY Stages used in the LATRt lab. +These stages are run off a Raspberry Pi connected to some custom electronics +boards for communicating with the stages. + +Since control of these stages need to be accessible inside and outside OCS, +their drivers are shared `here +`_. + +.. argparse:: + :filename: ../agents/xy_stage/xy_latrt_agent.py + :func: make_parser + :prog: python3 xy_latrt_agent.py + + +Configuration File Examples +--------------------------- +Below are configuration examples for the ocs config file and for running the +Agent in a docker container. + +ocs-config +`````````` +To configure the LATRt XY Stage Agent we need to add a block to our ocs +configuration file. Here is an example configuration block using all of +the available arguments:: + + {'agent-class': 'LATRtXYStageAgent', + 'instance-id': 'XYWing', + 'arguments': [ + ['--ip-address', '192.168.10.15'], + ['--port', 3010], + ['--mode', 'acq'], + ['--sampling_freqency', 2'], + ]}, + +Example Client +-------------- +Below is an example client demonstrating full agent functionality. +Note that all tasks can be run even while the data acquisition process +is running.:: + + from ocs.matched_client import MatchedClient + + #Initialize the Stages + xy_agent = MatchedClient('XYWing', args=[]) + xy_agent.init.start() + xy_agent.init.wait() + + #Move in X + xy_agent.move_x_cm.start( distance=6, velocity=1) + xy_agent.move_x_cm.wait() + + #Move in Y + xy_agent.move_y_cm.start( distance=6, velocity=1) + xy_agent.move_y_cm.wait() + + #Get instantaneous position + status, message, session = xy_stage.acq.status() + print(session['data']['data']) + +Agent API +--------- + +.. autoclass:: agents.xy_stage.xy_latrt_agent.LATRtXYStageAgent + :members: init_xy_stage_task, move_x_cm, move_y_cm, set_position, start_acq, stop_acq diff --git a/docs/index.rst b/docs/index.rst index 7318e51af..a3e19215b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -42,6 +42,7 @@ Simulator Reference Simulators are used to mock software and hardware agents/labjack agents/lakeshore240 agents/lakeshore372 + agents/latrt_xy_stage agents/meinberg_m1000_agent agents/pfeiffer agents/pysmurf/index From 63cf9e2b5eded416c9eeb3e923c3f90ab1568ec8 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Tue, 4 May 2021 12:35:47 -0400 Subject: [PATCH 39/43] Lakeshore 370 Agent (#161) * Create LS370 Agent with Dockerfile and associated LS370 driver * Cast heater data as floats * Create Lakshore370 Agent docs page based on 372 page * Protect ocs imports on readthedocs * Fix some flake8 warnings * Add Lakeshore 370 agent to docker-compose * Add 370 Agent to ocs_plugin file * Mark private methods in Channel and Heater classes * Update documentation, including session.data examples Co-authored-by: Zachary Atkins --- agents/lakeshore370/Dockerfile | 18 + agents/lakeshore370/LS370_agent.py | 795 +++++++++++++ agents/ocs_plugin_so.py | 1 + docker-compose.yml | 7 + docs/agents/lakeshore370.rst | 97 ++ docs/index.rst | 1 + socs/Lakeshore/Lakeshore370.py | 1776 ++++++++++++++++++++++++++++ 7 files changed, 2695 insertions(+) create mode 100644 agents/lakeshore370/Dockerfile create mode 100644 agents/lakeshore370/LS370_agent.py create mode 100644 docs/agents/lakeshore370.rst create mode 100644 socs/Lakeshore/Lakeshore370.py diff --git a/agents/lakeshore370/Dockerfile b/agents/lakeshore370/Dockerfile new file mode 100644 index 000000000..abfc3397d --- /dev/null +++ b/agents/lakeshore370/Dockerfile @@ -0,0 +1,18 @@ +# SOCS Lakeshore 370 Agent +# socs Agent container for interacting with a Lakeshore 370. + +# Use socs base image +FROM socs:latest + +# Set the working directory to registry directory +WORKDIR /app/socs/agents/lakeshore370/ + +# Copy this agent into the app/agents directory +COPY . . + +# Run registry on container startup +ENTRYPOINT ["dumb-init", "python3", "-u", "LS370_agent.py"] + +# Sensible default arguments +CMD ["--site-hub=ws://sisock-crossbar:8001/ws", \ + "--site-http=http://sisock-crossbar:8001/call"] diff --git a/agents/lakeshore370/LS370_agent.py b/agents/lakeshore370/LS370_agent.py new file mode 100644 index 000000000..023e530ab --- /dev/null +++ b/agents/lakeshore370/LS370_agent.py @@ -0,0 +1,795 @@ +import os +import random +import argparse +import time +import numpy as np +import txaio +import threading +from contextlib import contextmanager + +from socs.Lakeshore.Lakeshore370 import LS370 + +ON_RTD = os.environ.get('READTHEDOCS') == 'True' +if not ON_RTD: + from ocs import ocs_agent, site_config + from ocs.ocs_twisted import TimeoutLock + + +class YieldingLock: + """A lock protected by a lock. This braided arrangement guarantees + that a thread waiting on the lock will get priority over a thread + that has just released the lock and wants to reacquire it. + + The typical use case is a Process that wants to hold the lock as + much as possible, but occasionally release the lock (without + sleeping for long) so another thread can access a resource. The + method release_and_acquire() is provided to make this a one-liner. + + """ + def __init__(self, default_timeout=None): + self.job = None + self._next = threading.Lock() + self._active = threading.Lock() + self._default_timeout = default_timeout + + def acquire(self, timeout=None, job=None): + if timeout is None: + timeout = self._default_timeout + if timeout is None or timeout == 0.: + kw = {'blocking': False} + else: + kw = {'blocking': True, 'timeout': timeout} + result = False + if self._next.acquire(**kw): + if self._active.acquire(**kw): + self.job = job + result = True + self._next.release() + return result + + def release(self): + self.job = None + return self._active.release() + + def release_and_acquire(self, timeout=None): + job = self.job + self.release() + return self.acquire(timeout=timeout, job=job) + + @contextmanager + def acquire_timeout(self, timeout=None, job='unnamed'): + result = self.acquire(timeout=timeout, job=job) + if result: + try: + yield result + finally: + self.release() + else: + yield result + + +class LS370_Agent: + """Agent to connect to a single Lakeshore 370 device. + + Args: + name (ApplicationSession): ApplicationSession for the Agent. + port (str): Serial port for the 370 device, e.g. '/dev/ttyUSB2' + fake_data (bool, optional): generates random numbers without connecting + to LS if True. + dwell_time_delay (int, optional): Amount of time, in seconds, to + delay data collection after switching channels. Note this time + should not include the change pause time, which is automatically + accounted for. Will automatically be reduced to dwell_time - 1 + second if it is set longer than a channel's dwell time. This + ensures at least one second of data collection at the end of a scan. + + """ + def __init__(self, agent, name, port, fake_data=False, dwell_time_delay=0): + + # self._acq_proc_lock is held for the duration of the acq Process. + # Tasks that require acq to not be running, at all, should use + # this lock. + self._acq_proc_lock = TimeoutLock() + + # self._lock is held by the acq Process only when accessing + # the hardware but released occasionally so that (short) Tasks + # may run. Use a YieldingLock to guarantee that a waiting + # Task gets activated preferentially, even if the acq thread + # immediately tries to reacquire. + self._lock = YieldingLock(default_timeout=5) + + self.name = name + self.port = port + self.fake_data = fake_data + self.dwell_time_delay = dwell_time_delay + self.module = None + self.thermometers = [] + + self.log = agent.log + self.initialized = False + self.take_data = False + + self.agent = agent + # Registers temperature feeds + agg_params = { + 'frame_length': 10*60 # [sec] + } + self.agent.register_feed('temperatures', + record=True, + agg_params=agg_params, + buffer_time=1) + + def init_lakeshore_task(self, session, params=None): + """init_lakeshore_task(params=None) + + Perform first time setup of the Lakeshore 370 communication. + + Args: + params (dict): Parameters dictionary for passing parameters to + task. + + Parameters: + auto_acquire (bool, optional): Default is False. Starts data + acquisition after initialization if True. + force (bool, optional): Force re-initialize the lakeshore if True. + + """ + + if params is None: + params = {} + + if self.initialized and not params.get('force', False): + self.log.info("Lakeshore already initialized. Returning...") + return True, "Already initialized" + + with self._lock.acquire_timeout(job='init') as acquired1, \ + self._acq_proc_lock.acquire_timeout(timeout=0., job='init') \ + as acquired2: + if not acquired1: + self.log.warn(f"Could not start init because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + if not acquired2: + self.log.warn(f"Could not start init because " + f"{self._acq_proc_lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + if self.fake_data: + self.res = random.randrange(1, 1000) + session.add_message("No initialization since faking data") + self.thermometers = ["thermA", "thermB"] + else: + self.module = LS370(self.port) + print("Initialized Lakeshore module: {!s}".format(self.module)) + session.add_message("Lakeshore initilized with ID: %s" % self.module.id) + + self.thermometers = [channel.name for channel in self.module.channels] + + self.initialized = True + + # Start data acquisition if requested + if params.get('auto_acquire', False): + self.agent.start('acq') + + return True, 'Lakeshore module initialized.' + + def start_acq(self, session, params=None): + """acq(params=None) + + Method to start data acquisition process. + + """ + + with self._acq_proc_lock.acquire_timeout(timeout=0, job='acq') \ + as acq_acquired, \ + self._lock.acquire_timeout(job='acq') as acquired: + if not acq_acquired: + self.log.warn(f"Could not start Process because " + f"{self._acq_proc_lock.job} is already running") + return False, "Could not acquire lock" + if not acquired: + self.log.warn(f"Could not start Process because " + f"{self._lock.job} is holding the lock") + return False, "Could not acquire lock" + + session.set_status('running') + self.log.info("Starting data acquisition for {}".format(self.agent.agent_address)) + previous_channel = None + last_release = time.time() + + self.take_data = True + while self.take_data: + + # Relinquish sampling lock occasionally. + if time.time() - last_release > 1.: + last_release = time.time() + if not self._lock.release_and_acquire(timeout=10): + self.log.warn(f"Failed to re-acquire sampling lock, " + f"currently held by {self._lock.job}.") + continue + + if self.fake_data: + data = { + 'timestamp': time.time(), + 'block_name': 'fake-data', + 'data': {} + } + for therm in self.thermometers: + reading = np.random.normal(self.res, 20) + data['data'][therm] = reading + time.sleep(.1) + + else: + active_channel = self.module.get_active_channel() + + # The 370 reports the last updated measurement repeatedly + # during the "pause change time", this results in several + # stale datapoints being recorded. To get around this we + # query the pause time and skip data collection during it + # if the channel has changed (as it would if autoscan is + # enabled.) + if previous_channel != active_channel: + if previous_channel is not None: + pause_time = active_channel.get_pause() + self.log.debug("Pause time for {c}: {p}", + c=active_channel.channel_num, + p=pause_time) + + dwell_time = active_channel.get_dwell() + self.log.debug("User set dwell_time_delay: {p}", + p=self.dwell_time_delay) + + # Check user set dwell time isn't too long + if self.dwell_time_delay > dwell_time: + self.log.warn("WARNING: User set dwell_time_delay of " + + "{delay} s is larger than channel " + + "dwell time of {chan_time} s. If " + + "you are autoscanning this will " + + "cause no data to be collected. " + + "Reducing dwell time delay to {s} s.", + delay=self.dwell_time_delay, + chan_time=dwell_time, + s=dwell_time - 1) + total_time = pause_time + dwell_time - 1 + else: + total_time = pause_time + self.dwell_time_delay + + for i in range(total_time): + self.log.debug("Sleeping for {t} more seconds...", + t=total_time-i) + time.sleep(1) + + # Track the last channel we measured + previous_channel = self.module.get_active_channel() + + # Setup feed dictionary + channel_str = active_channel.name.replace(' ', '_') + data = { + 'timestamp': time.time(), + 'block_name': channel_str, + 'data': {} + } + + # Collect both temperature and resistance values from each Channel + data['data'][channel_str + '_T'] = \ + self.module.get_temp(unit='kelvin', chan=active_channel.channel_num) + data['data'][channel_str + '_R'] = \ + self.module.get_temp(unit='ohms', chan=active_channel.channel_num) + + # Courtesy in case active channel has not changed + time.sleep(0.1) + + session.app.publish_to_feed('temperatures', data) + + return True, 'Acquisition exited cleanly.' + + def stop_acq(self, session, params=None): + """ + Stops acq process. + """ + if self.take_data: + self.take_data = False + return True, 'requested to stop taking data.' + else: + return False, 'acq is not currently running' + + def set_heater_range(self, session, params): + """ + Adjust the heater range for servoing cryostat. Wait for a specified + amount of time after the change. + + :param params: dict with 'heater', 'range', 'wait' keys + :type params: dict + + heater - which heater to set range for, 'sample' by default (and the only implemented one) + range - the heater range value to change to + wait - time in seconds after changing the heater value to wait, allows + the servo to adjust to the new heater range, typical value of + ~600 seconds + """ + with self._lock.acquire_timeout(job='set_heater_range') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + heater_string = params.get('heater', 'sample') + if heater_string.lower() == 'sample': + heater = self.module.sample_heater + elif heater_string.lower() == 'still': # TODO: add still heater class to driver + # heater = self.module.still_heater + self.log.warn(f"{heater_string} heater not yet implemented in this agent, please modify client") + + current_range = heater.get_heater_range() + + if params['range'] == current_range: + print("Current heater range matches commanded value. Proceeding unchanged.") + else: + heater.set_heater_range(params['range']) + time.sleep(params['wait']) + + return True, f'Set {heater_string} heater range to {params["range"]}' + + def set_excitation_mode(self, session, params): + """ + Set the excitation mode of a specified channel. + + :param params: dict with "channel" and "mode" keys for Channel.set_excitation_mode() + :type params: dict + """ + + with self._lock.acquire_timeout(job='set_excitation_mode') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + self.module.chan_num2channel(params['channel']).set_excitation_mode(params['mode']) + session.add_message(f'post message in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}') + print(f'print statement in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}') + + return True, f'return text for Set channel {params["channel"]} excitation mode to {params["mode"]}' + + def set_excitation(self, session, params): + """ + Set the excitation voltage/current value of a specified channel. + + :param params: dict with "channel" and "value" keys for Channel.set_excitation() + :type params: dict + """ + with self._lock.acquire_timeout(job='set_excitation') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + current_excitation = self.module.chan_num2channel(params['channel']).get_excitation() + + if params['value'] == current_excitation: + print(f'Channel {params["channel"]} excitation already set to {params["value"]}') + else: + self.module.chan_num2channel(params['channel']).set_excitation(params['value']) + session.add_message(f'Set channel {params["channel"]} excitation to {params["value"]}') + print(f'Set channel {params["channel"]} excitation to {params["value"]}') + + return True, f'Set channel {params["channel"]} excitation to {params["value"]}' + + def set_pid(self, session, params): + """ + Set the PID parameters for servo control of fridge. + + :param params: dict with "P", "I", and "D" keys for Heater.set_pid() + :type params: dict + """ + with self._lock.acquire_timeout(job='set_pid') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + self.module.sample_heater.set_pid(params["P"], params["I"], params["D"]) + session.add_message(f'post message text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}') + print(f'print text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}') + + return True, f'return text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' + + def set_active_channel(self, session, params): + """ + Set the active channel on the LS370. + + :param params: dict with "channel" number + :type params: dict + """ + with self._lock.acquire_timeout(job='set_active_channel') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + self.module.set_active_channel(params["channel"]) + session.add_message(f'post message text for set channel to {params["channel"]}') + print(f'print text for set channel to {params["channel"]}') + + return True, f'return text for set channel to {params["channel"]}' + + def set_autoscan(self, session, params): + """ + Sets autoscan on the LS370. + :param params: dict with "autoscan" value + """ + with self._lock.acquire_timeout(job='set_autoscan') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + if params['autoscan']: + self.module.enable_autoscan() + self.log.info('enabled autoscan') + else: + self.module.disable_autoscan() + self.log.info('disabled autoscan') + + return True, 'Set autoscan to {}'.format(params['autoscan']) + + def servo_to_temperature(self, session, params): + """Servo to temperature passed into params. + + :param params: dict with "temperature" Heater.set_setpoint() in units of K, and + "channel" as an integer (optional) + :type params: dict + """ + with self._lock.acquire_timeout(job='servo_to_temperature') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + # Check we're in correct control mode for servo. + if self.module.sample_heater.mode != 'Closed Loop': + session.add_message('Changing control to Closed Loop mode for servo.') + self.module.sample_heater.set_mode("Closed Loop") + + # Check we aren't autoscanning. + if self.module.get_autoscan() is True: + session.add_message('Autoscan is enabled, disabling for PID control on dedicated channel.') + self.module.disable_autoscan() + + # Check to see if we passed an input channel, and if so change to it + if params.get("channel", False) is not False: + session.add_message(f'Changing heater input channel to {params.get("channel")}') + self.module.sample_heater.set_input_channel(params.get("channel")) + + # Check we're scanning same channel expected by heater for control. + if self.module.get_active_channel().channel_num != int(self.module.sample_heater.input): + session.add_message('Changing active channel to expected heater control input') + self.module.set_active_channel(int(self.module.sample_heater.input)) + + # Check we're setup to take correct units. + if self.module.sample_heater.units != 'kelvin': + session.add_message('Setting preferred units to Kelvin on heater control.') + self.module.sample_heater.set_units('kelvin') + + # Make sure we aren't servoing too high in temperature. + if params["temperature"] > 1: + return False, 'Servo temperature is set above 1K. Aborting.' + + self.module.sample_heater.set_setpoint(params["temperature"]) + + return True, f'Setpoint now set to {params["temperature"]} K' + + def check_temperature_stability(self, session, params): + """Check servo temperature stability is within threshold. + + :param params: dict with "measurements" and "threshold" parameters + :type params: dict + + measurements - number of measurements to average for stability check + threshold - amount within which the average needs to be to the setpoint for stability + """ + with self._lock.acquire_timeout(job='check_temp_stability') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + setpoint = float(self.module.sample_heater.get_setpoint()) + + if params is None: + params = {'measurements': 10, 'threshold': 0.5e-3} + + test_temps = [] + + for i in range(params['measurements']): + test_temps.append(self.module.get_temp()) + time.sleep(.1) # sampling rate is 10 readings/sec, so wait 0.1 s for a new reading + + mean = np.mean(test_temps) + session.add_message(f'Average of {params["measurements"]} measurements is {mean} K.') + print(f'Average of {params["measurements"]} measurements is {mean} K.') + + if np.abs(mean - setpoint) < params['threshold']: + print("passed threshold") + session.add_message('Setpoint Difference: ' + str(mean - setpoint)) + session.add_message(f'Average is within {params["threshold"]} K threshold. Proceeding with calibration.') + + return True, f"Servo temperature is stable within {params['threshold']} K" + + else: + print("we're in the else") + # adjust_heater(t,rest) + + return False, f"Temperature not stable within {params['threshold']}." + + def set_output_mode(self, session, params=None): + """ + Set output mode of the heater. + + :param params: dict with "heater" and "mode" parameters + :type params: dict + + heater - Specifies which heater to control. Either 'sample' or 'still' + mode - Specifies mode of heater. Can be "Off", "Monitor Out", "Open Loop", + "Zone", "Still", "Closed Loop", or "Warm up" + """ + + with self._lock.acquire_timeout(job='set_output_mode') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + session.set_status('running') + + if params['heater'].lower() == 'still': + # self.module.still_heater.set_mode(params['mode']) #TODO: add still heater to driver + self.log.warn(f"{params['heater']} heater not yet implemented in this agent, please modify client") + if params['heater'].lower() == 'sample': + self.module.sample_heater.set_mode(params['mode']) + self.log.info("Set {} output mode to {}".format(params['heater'], params['mode'])) + + return True, "Set {} output mode to {}".format(params['heater'], params['mode']) + + def set_heater_output(self, session, params=None): + """ + Set display type and output of the heater. + + :param params: dict with "heater", "display", and "output" parameters + :type params: dict + + heater - Specifies which heater to control. Either 'sample' or 'still' + output - Specifies heater output value. + If display is set to "Current" or heater is "still", can be any number between 0 and 100. + If display is set to "Power", can be any number between 0 and the maximum allowed power. + + display (opt)- Specifies heater display type. Can be "Current" or "Power". + If None, heater display is not reset before setting output. + + """ + + with self._lock.acquire_timeout(job='set_heater_output') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + heater = params['heater'].lower() + output = params['output'] + + display = params.get('display', None) + + if heater == 'still': # TODO: add still heater to driver + # self.module.still_heater.set_heater_output(output, display_type=display) + self.log.warn(f"{heater} heater not yet implemented in this agent, please modify client") + if heater.lower() == 'sample': + self.log.info("display: {}\toutput: {}".format(display, output)) + self.module.sample_heater.set_heater_output(output, display_type=display) + + self.log.info("Set {} heater display to {}, output to {}".format(heater, display, output)) + + session.set_status('running') + + data = {'timestamp': time.time(), + 'block_name': '{}_heater_out'.format(heater), + 'data': {'{}_heater_out'.format(heater): output} + } + session.app.publish_to_feed('temperatures', data) + + return True, "Set {} display to {}, output to {}".format(heater, display, output) + + def get_channel_attribute(self, session, params): + """Gets an arbitrary channel attribute, stored in the session.data dict + + Parameters + ---------- + params : dict + Contains parameters 'attribute' (not optional), 'channel' (optional, default '1'). + + Channel attributes stored in the session.data object are in the structure:: + + >>> session.data + {"calibration_curve": 21, + "dwell": 3, + "excitation": 6.32e-6, + "excitation_mode": "voltage", + "excitation_power": 2.0e-15, + "kelvin_reading": 100.0e-3, + "pause": 3, + "reading_status": ["T.UNDER"] + "resistance_range": 2.0e-3, + "resistance_reading": 10.0e3, + "temperature_coefficient": "negative", + } + + Note: Only attribute called with this method will be populated for the + given channel. This example shows all available attributes. + + """ + with self._lock.acquire_timeout(job=f"get_{params['attribute']}", timeout=3) as acquired: + if not acquired: + print(f"Lock could not be acquired because it is held by {self._lock.job}") + return False, 'Could not acquire lock' + + session.set_status('running') + + # get channel + channel_key = int(params.get('channel', 1)) + channel = self.module.chan_num2channel(channel_key) + + # check that attribute is a valid channel method + if getattr(channel, f"get_{params['attribute']}", False) is not False: + query = getattr(channel, f"get_{params['attribute']}") + + # get attribute + resp = query() + session.data[params['attribute']] = resp + + time.sleep(.1) + + return True, f"Retrieved {channel.name} {params['attribute']}" + + def get_heater_attribute(self, session, params): + """Gets an arbitrary heater attribute, stored in the session.data dict + + Parameters + ---------- + params : dict + Contains parameters 'attribute'. + + Heater attributes stored in the session.data object are in the structure:: + + >>> session.data + {"heater_range": 1e-3, + "heater_setup": ["current", 1e-3, 120], + "input_channel": 6, + "manual_out": 0.0, + "mode": "Closed Loop", + "pid": (80, 10, 0), + "setpoint": 100e-3, + "still_output", 10.607, + "units": "kelvin", + } + + Note: Only the attribute called with this method will be populated, + this example just shows all available attributes. + + """ + with self._lock.acquire_timeout(job=f"get_{params['attribute']}", timeout=3) as acquired: + if not acquired: + print(f"Lock could not be acquired because it is held by {self._lock.job}") + return False, 'Could not acquire lock' + + session.set_status('running') + + # get heater + heater = self.module.sample_heater + + # check that attribute is a valid heater method + if getattr(heater, f"get_{params['attribute']}", False) is not False: + query = getattr(heater, f"get_{params['attribute']}") + + # get attribute + resp = query() + session.data[params['attribute']] = resp + + time.sleep(.1) + + return True, f"Retrieved sample heater {params['attribute']}" + + +def make_parser(parser=None): + """Build the argument parser for the Agent. Allows sphinx to automatically + build documentation based on this function. + + """ + if parser is None: + parser = argparse.ArgumentParser() + + # Add options specific to this agent. + pgroup = parser.add_argument_group('Agent Options') + pgroup.add_argument('--port', type=str, help='Full path to USB node for the lakeshore, e.g. "/dev/ttyUSB0"') + pgroup.add_argument('--serial-number') + pgroup.add_argument('--mode') + pgroup.add_argument('--fake-data', type=int, default=0, + help='Set non-zero to fake data, without hardware.') + pgroup.add_argument('--dwell-time-delay', type=int, default=0, + help="Amount of time, in seconds, to delay data\ + collection after switching channels. Note this\ + time should not include the change pause time,\ + which is automatically accounted for.\ + Will automatically be reduced to dwell_time - 1\ + second if it is set longer than a channel's dwell\ + time. This ensures at least one second of data\ + collection at the end of a scan.") + pgroup.add_argument('--auto-acquire', type=bool, default=True, + help='Automatically start data acquisition on startup') + + return parser + + +if __name__ == '__main__': + # For logging + txaio.use_twisted() + LOG = txaio.make_logger() + + # Start logging + txaio.start_logging(level=os.environ.get("LOGLEVEL", "info")) + + # Get the default ocs argument parser. + site_parser = site_config.add_arguments() + + parser = make_parser(site_parser) + + # Parse comand line. + args = parser.parse_args() + + # Automatically acquire data if requested (default) + init_params = False + if args.auto_acquire: + init_params = {'auto_acquire': True} + + # Interpret options in the context of site_config. + site_config.reparse_args(args, 'Lakeshore370Agent') + print('I am in charge of device with serial number: %s' % args.serial_number) + + agent, runner = ocs_agent.init_site_agent(args) + + lake_agent = LS370_Agent(agent, args.serial_number, args.port, + fake_data=args.fake_data, + dwell_time_delay=args.dwell_time_delay) + + agent.register_task('init_lakeshore', lake_agent.init_lakeshore_task, + startup=init_params) + agent.register_task('set_heater_range', lake_agent.set_heater_range) + agent.register_task('set_excitation_mode', lake_agent.set_excitation_mode) + agent.register_task('set_excitation', lake_agent.set_excitation) + agent.register_task('set_pid', lake_agent.set_pid) + agent.register_task('set_autoscan', lake_agent.set_autoscan) + agent.register_task('set_active_channel', lake_agent.set_active_channel) + agent.register_task('servo_to_temperature', lake_agent.servo_to_temperature) + agent.register_task('check_temperature_stability', lake_agent.check_temperature_stability) + agent.register_task('set_output_mode', lake_agent.set_output_mode) + agent.register_task('set_heater_output', lake_agent.set_heater_output) + agent.register_task('get_channel_attribute', lake_agent.get_channel_attribute) + agent.register_task('get_heater_attribute', lake_agent.get_heater_attribute) + agent.register_process('acq', lake_agent.start_acq, lake_agent.stop_acq) + + runner.run(agent, auto_reconnect=True) diff --git a/agents/ocs_plugin_so.py b/agents/ocs_plugin_so.py index 166059571..2582f3621 100644 --- a/agents/ocs_plugin_so.py +++ b/agents/ocs_plugin_so.py @@ -10,6 +10,7 @@ for n,f in [ ('Lakeshore372Agent', 'lakeshore372/LS372_agent.py'), + ('Lakeshore370Agent', 'lakeshore370/LS370_agent.py'), ('Lakeshore240Agent', 'lakeshore240/LS240_agent.py'), ('Keithley2230G-PSU', 'keithley2230G-psu/keithley_agent.py'), ('PysmurfController', 'smurf/pysmurf_control.py'), diff --git a/docker-compose.yml b/docker-compose.yml index 64cd482f7..0229881bc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -18,6 +18,13 @@ services: image: "ocs-lakeshore372-agent" build: ./agents/lakeshore372/ + # -------------------------------------------------------------------------- + # The Lakeshore 370 Agent + # -------------------------------------------------------------------------- + ocs-lakeshore370-agent: + image: "ocs-lakeshore370-agent" + build: ./agents/lakeshore370/ + # -------------------------------------------------------------------------- # The Lakeshore 240 Agent # -------------------------------------------------------------------------- diff --git a/docs/agents/lakeshore370.rst b/docs/agents/lakeshore370.rst new file mode 100644 index 000000000..f2ad78fc7 --- /dev/null +++ b/docs/agents/lakeshore370.rst @@ -0,0 +1,97 @@ +.. highlight:: rst + +.. _lakeshore370: + +============= +Lakeshore 370 +============= + +The Lakeshore 370 (LS370) units are an older version of the Lakshore 372, used +for 100 mK and 1K thermometer readout. Basic functionality to interface and +control an LS370 is provided by the +``socs.Lakeshore.Lakeshore370.py`` module. + +.. argparse:: + :filename: ../agents/lakeshore370/LS370_agent.py + :func: make_parser + :prog: python3 LS370_agent.py + +OCS Configuration +----------------- + +To configure your Lakeshore 370 for use with OCS you need to add a +Lakeshore370Agent block to your ocs configuration file. Here is an example +configuration block:: + + {'agent-class': 'Lakeshore370Agent', + 'instance-id': 'LSA22YG', + 'arguments': [['--serial-number', 'LSA22YG'], + ['--port', '/dev/ttyUSB1'], + ['--dwell-time-delay', 0]]}, + +Each device requires configuration under 'agent-instances'. See the OCS site +configs documentation for more details. + +Docker Configuration +-------------------- + +The Lakeshore 370 Agent should be configured to run in a Docker container. An +example configuration is:: + + ocs-LSA22YE: + image: simonsobs/ocs-lakeshore370-agent + hostname: ocs-docker + volumes: + - ${OCS_CONFIG_DIR}:/config:ro + devices: + - "/dev/ttyUSB1:/dev/ttyUSB1" + command: + - "--instance-id=LSA22YE" + - "--site-hub=ws://crossbar:8001/ws" + - "--site-http=http://crossbar:8001/call" + +.. note:: + The serial numbers here will need to be updated for your device. + +.. note:: + The device path may differ on your machine, and if only using the ttyUSB + value as shown here, is not guaranteed to be static. + +Direct Communication +-------------------- +Direct communication with the Lakeshore can be achieved without OCS, using the +``Lakeshore370.py`` module in ``socs/socs/Lakeshore/``. From that directory, +you can run a script like:: + + from Lakeshore370 import LS370 + + ls = LS370('/dev/ttyUSB1') + +You can use the API detailed on this page to then interact with the Lakeshore. +Each Channel is given a Channel object in ``ls.channels``. You can query the +resistance measured on the currently active channel with:: + + ls.get_active_channel().get_resistance_reading() + +That should get you started with direct communication. The API is fairly full +featured. For any feature requests for functionality that might be missing, +please file a Github issue. + +Agent API +--------- + +.. autoclass:: agents.lakeshore370.LS370_agent.LS370_Agent + :members: init_lakeshore_task, start_acq + +Driver API +---------- + +For the API all methods should start with one of the following: + + * set - set a parameter of arbitary input (i.e. set_excitation) + * get - get the status of a parameter (i.e. get_excitation) + * enable - enable a boolean parameter (i.e. enable_autoscan) + * disable - disbale a boolean parameter (i.e. disable_channel) + +.. automodule:: socs.Lakeshore.Lakeshore370 + :members: diff --git a/docs/index.rst b/docs/index.rst index a3e19215b..3f2c99670 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -41,6 +41,7 @@ Simulator Reference Simulators are used to mock software and hardware agents/cryomech_cpa agents/labjack agents/lakeshore240 + agents/lakeshore370 agents/lakeshore372 agents/latrt_xy_stage agents/meinberg_m1000_agent diff --git a/socs/Lakeshore/Lakeshore370.py b/socs/Lakeshore/Lakeshore370.py new file mode 100644 index 000000000..3e29c45dd --- /dev/null +++ b/socs/Lakeshore/Lakeshore370.py @@ -0,0 +1,1776 @@ +#!/usr/bin/env python3 +# Lakeshore370.py + +import sys +import serial +import time +import numpy as np + +import traceback + +# Lookup keys for command parameters. +autorange_key = {'0': 'off', + '1': 'on'} + +mode_key = {'0': 'voltage', + '1': 'current'} + +mode_lock = {'voltage': '0', + 'current': '1'} + +voltage_excitation_key = {1: 2.0e-6, + 2: 6.32e-6, + 3: 20.0e-6, + 4: 63.2e-6, + 5: 200.0e-6, + 6: 632.0e-6, + 7: 2.0e-3, + 8: 6.32e-3, + 9: 20.0e-3, + 10: 63.2e-3, + 11: 200.0e-3, + 12: 632.0e-3} + +current_excitation_key = {1: 1.0e-12, + 2: 3.16e-12, + 3: 10.0e-12, + 4: 31.6e-12, + 5: 100.0e-12, + 6: 316.0e-12, + 7: 1.0e-9, + 8: 3.16e-9, + 9: 10.0e-9, + 10: 31.6e-9, + 11: 100.0e-9, + 12: 316.0e-9, + 13: 1.0e-6, + 14: 3.16e-6, + 15: 10.0e-6, + 16: 31.6e-6, + 17: 100.0e-6, + 18: 316.0e-6, + 19: 1.0e-3, + 20: 3.16e-3, + 21: 10.0-3, + 22: 31.6-3} + +voltage_excitation_lock = {2.0e-6: 1, + 6.32e-6: 2, + 20.0e-6: 3, + 63.2e-6: 4, + 200.0e-6: 5, + 632.0e-6: 6, + 2.0e-3: 7, + 6.32e-3: 8, + 20.0e-3: 9, + 63.2e-3: 10, + 200.0e-3: 11, + 632.0e-3: 12} + +current_excitation_lock = {1.0e-12: 1, + 3.16e-12: 2, + 10.0e-12: 3, + 31.6e-12: 4, + 100.0e-12: 5, + 316.0e-12: 6, + 1.0e-9: 7, + 3.16e-9: 8, + 10.0e-9: 9, + 31.6e-9: 10, + 100.0e-9: 11, + 316.0e-9: 12, + 1.0e-6: 13, + 3.16e-6: 14, + 10.0e-6: 15, + 31.6e-6: 16, + 100.0e-6: 17, + 316.0e-6: 18, + 1.0e-3: 19, + 3.16e-3: 20, + 10.0-3: 21, + 31.6-3: 22} + +range_key = {1: 2.0e-3, + 2: 6.32e-3, + 3: 20.0e-3, + 4: 63.2e-3, + 5: 200e-3, + 6: 632e-3, + 7: 2.0, + 8: 6.32, + 9: 20.0, + 10: 63.2, + 11: 200, + 12: 632, + 13: 2e3, + 14: 6.32e3, + 15: 20.0e3, + 16: 63.2e3, + 17: 200e3, + 18: 632e3, + 19: 2e6, + 20: 6.32e6, + 21: 20.0e6, + 22: 63.2e6} + +range_lock = {2.0e-3: 1, + 6.32e-3: 2, + 20.0e-3: 3, + 63.2e-3: 4, + 200e-3: 5, + 632e-3: 6, + 2.0: 7, + 6.32: 8, + 20.0: 9, + 63.2: 10, + 200: 11, + 632: 12, + 2e3: 13, + 6.32e3: 14, + 20.0e3: 15, + 63.2e3: 16, + 200e3: 17, + 632e3: 18, + 2e6: 19, + 6.32e6: 20, + 20.0e6: 21, + 63.2e6: 22} + +units_key = {'1': 'kelvin', + '2': 'ohms'} + +units_lock = {'kelvin': '1', + 'ohms': '2'} + +csshunt_key = {'0': 'on', + '1': 'off'} + +tempco_key = {'1': 'negative', + '2': 'positive'} + +tempco_lock = {'negative': '1', + 'positive': '2'} + +format_key = {'3': "Ohm/K (linear)", + '4': "log Ohm/K (linear)"} + +format_lock = {"Ohm/K (linear)": '3', + "log Ohm/K (linear)": '4'} + +heater_range_key = {"0": "Off", "1": 31.6e-6, "2": 100e-6, "3": 316e-6, + "4": 1e-3, "5": 3.16e-3, "6": 10e-3, "7": 31.6e-3, + "8": 100e-3} +heater_range_lock = {v:k for k, v in heater_range_key.items()} +heater_range_lock["On"] = "1" + +output_modes = {'1': 'Closed Loop', '2': 'Zone', '3': 'Open Loop', '4': 'Off'} +output_modes_lock = {v.lower():k for k, v in output_modes.items()} + +analog_modes = {'0': 'Off', '1': 'Channel', '2': 'Manual', '3': 'Zone', '4': 'Still'} +analog_modes_lock = {v.lower():k for k, v in analog_modes.items()} + +heater_display_key = { '1': 'current', + '2': 'power'} +heater_display_lock = {v: k for k,v in heater_display_key.items()} + + +class LS370: + """ + Lakeshore 370 class. + + Attributes: + channels - list of channels, index corresponds to channel number with + index 0 corresponding to channel 1 + """ + + _bytesize = serial.SEVENBITS + _parity = serial.PARITY_ODD + _stopbits = serial.STOPBITS_ONE + + def __init__(self, port, baudrate=9600, timeout=10, num_channels=16): + self.port = port + self.baudrate = baudrate + self.timeout = timeout + + print(self.baudrate) + + self.com = serial.Serial(self.port, self.baudrate, self._bytesize, self._parity, self._stopbits, self.timeout) + self.num_channels = num_channels + + self.id = self.get_id() + self.autoscan = self.get_autoscan() + + self.channels = [] + + #unlike 372, 370 does not have dedicated control input channel; rather, only numbered channels + for i in range(1, num_channels + 1): + c = Channel(self, i) + self.channels.append(c) + + self.sample_heater = Heater(self) + #self.still_heater = Heater(self, 2) + + def msg(self, message): + """Send message to the Lakeshore 370 over RS-232. + + If we're asking for something from the Lakeshore (indicated by a ? in + the message string), then we will attempt to ask twice before giving up + due to potential communication timeouts. + + Parameters + ---------- + message : str + Message string as described in the Lakeshore 370 manual. + + Returns + ------- + str + Response string from the Lakeshore, if any. Else, an empty string. + + """ + msg_str = f'{message}\r\n'.encode() + self.com.write(msg_str) + resp = '' + + if '?' in message: + resp = str(self.com.read_until(), 'utf-8').strip() + + # Try a few times, if we timeout, try again. + try_count = 3 + while resp == '': + if try_count == 0: + break + + print(f"Warning: Caught timeout waiting for response to {message}, waiting 1s and " \ + "trying again {try_count} more time(s) before giving up") + time.sleep(1) + + # retry comms + self.com.write(msg_str) + resp = str(self.com.read_until(), 'utf-8').strip() + try_count -= 1 + + time.sleep(0.1) # No comms for 100ms after sending message (manual says 50ms) + + return resp + + def get_id(self): + """Get the ID number of the Lakeshore unit.""" + return self.msg('*IDN?') + + def get_temp(self, unit='kelvin', chan=-1): + """Get temperature from the Lakeshore. + + Args: + unit (str): Unit to return reading for ('ohms' or 'kelvin') + chan (int): Channel to query, -1 for currently active channel + + Returns: + float: The reading from the lakeshore, either in ohms or kelvin. + + """ + if (chan == -1): + resp = self.msg("SCAN?") + c = int(resp.split(',')[0]) + else: + c = chan + + channel = self.chan_num2channel(c) + + assert unit.lower() in ['ohms', 'kelvin'] + + if unit == 'ohms': + return float(channel.get_resistance_reading()) + if unit == 'kelvin': + return float(channel.get_kelvin_reading()) + + def get_autoscan(self): + """Determine state of autoscan. + + :returns: state of autoscanner + :rtype: bool + """ + resp = self.msg('SCAN?') + scan_state = bool(int(resp.split(',')[1])) + self.autoscan = scan_state + return scan_state + + def _set_autoscan(self, start=1, autoscan=0): + """Set the autoscan state and start channel for scanning. + + :param start: Channel number to start scanning + :type start: int + :param autoscan: State of autoscan, 0 for off, 1 for on + :type autoscan: int + """ + assert autoscan in [0, 1] + + self.msg('SCAN {},{}'.format(start, autoscan)) + self.autoscan = bool(autoscan) + + def enable_autoscan(self): + """Enable the autoscan feature of the Lakeshore 370. + + Will query active channel to pass already selected channel to SCAN + command. + """ + active_channel = self.get_active_channel() + self.msg('SCAN {},{}'.format(active_channel.channel_num, 1)) + self.autoscan = True + + def disable_autoscan(self): + """Disable the autoscan feature of the Lakeshore 370. + + Will query active channel to pass already selected channel to SCAN + command. + """ + active_channel = self.get_active_channel() + self.msg('SCAN {},{}'.format(active_channel.channel_num, 0)) + self.autoscan = False + + def chan_num2channel(self, channel_number): + """Return a Channel Object from LS370.channels by associated Channel number + + :param channel_number: Number associated with Channel to be returned + :type channl_number: int + + :returns: Channel Object corresponding to channel_number + :rtype: Channel Object: + """ + channel_list = [_.channel_num for _ in self.channels] + idx = channel_list.index(channel_number) + return self.channels[idx] + + def get_active_channel(self): + """Query the Lakeshore for which channel it's currently scanning. + + :returns: channel object describing the scanned channel + :rtype: Channel Object + """ + resp = self.msg("SCAN?") + channel_number = int(resp.split(',')[0]) + return self.chan_num2channel(channel_number) + + def set_active_channel(self, channel): + """Set the active scanner channel. + + Query using SCAN? to determine autoscan parameter and set active + channel. + + :param channel: Channel number to switch scanner to. 1-8 or 1-16 + depending on scanner type + :type channel: int + """ + resp = self.msg("SCAN?") + autoscan_setting = resp.split(',')[1] + self.msg('SCAN {},{}'.format(channel, autoscan_setting)) + + # NET? + def get_network_settings(self): + pass + + # NETID? + def get_network_configuration(self): + pass + + +class Channel: + """Lakeshore 370 Channel Object + + :param ls: Lakeshore unit for communication + :type ls: LS370 Object + :param channel_num: The channel number (1-8 or 1-16 depending on scanner + type) + :type channel_num: int + """ + def __init__(self, ls, channel_num): + self.ls = ls + self.channel_num = channel_num + self.enabled = False + self._get_input_channel_parameter() + self._get_input_setup() + self.name = f'Channel {channel_num}' + #self.tlimit = self.get_temperature_limit() + + def _get_input_channel_parameter(self): + """Run Input Channel Parameter Query + + :: + + Input channel parameters include: + off/on - Specifies whether the input/channel is disabled or enabled + type off/on - bool + dwell - Specifies a value for the autoscanning dwell time 1 to 200 s + type dwell - int in units of seconds + pause - Specifies a value for the change pause time: 3 to 200 s + type pause - int in units of seconds + curve number - Specifies which curve the channel uses + type curve number - int + tempco - Sets the temperature coefficient that will be used for + temperature control if no curve is selected + type tempco - str + + + :returns: response from INSET? command + + Reference: LakeShore 370 Manual - page 6-29 + """ + resp = self.ls.msg(f"INSET? {self.channel_num}").split(',') + + self.enabled = bool(int(resp[0])) + self.dwell = int(resp[1]) # seconds + self.pause = int(resp[2]) # seconds + self.curve_num = int(resp[3]) + self.tempco = tempco_key[resp[4]] + + return resp + + def _set_input_channel_parameter(self, params): + """Set INSET. + + Parameters should be , , , , . Will determine from attributes. This + allows us to use output from _get_input_channel_parameters directly, as + it doesn't return . + + :param params: INSET parameters + :type params: list of str + + :returns: response from ls.msg + """ + assert len(params) == 5 + + reply = [str(self.channel_num)] + [reply.append(x) for x in params] + + param_str = ','.join(reply) + return self.ls.msg(f"INSET {param_str}") + + def _get_input_setup(self): + """Run Resistance Range Query, storing results in human readable format. + + :: + + Input setup parameters include: + mode - Sensor excitation mode. + Measurement input: 0 = Voltage Excitation Mode, + 1 = Current Excitation Mode + Control input (channel A): 1 = Current Excitation + type mode - int + excitation - Measurement input excitation range + type excitation - int + range - Measurement input resistance. Ignored for control input. + type range - int + autorange - Specifies if auto range is enabled. + 0 = off, + 1 = autorange current, + 2 = ROX102B Autorange (control input only) + type autorange - int + cs shunt - Current source shunt. + 0 = current source not shunted, excitation on + 1 = current source shunted, excitation off + type cs shunt - int + units - Specifies the preferred units parameter for sensor readings + and for the control setpoint: + 1 = kelvin, + 2 = ohms + type units - int + + :returns: response from RDGRNG? command + + Reference: LakeShore 370 Manual - page 6-33 - 6-34 + """ + resp = self.ls.msg(f"RDGRNG? {self.channel_num}").split(',') + + _mode = resp[0] + _excitation = resp[1] + _range = resp[2] + _autorange = resp[3] + _csshunt = resp[4] + #_units = resp[5] + + self.mode = mode_key[_mode] + + excitation_key = {'0': voltage_excitation_key, + '1': current_excitation_key} + + excitation_units_key = {'0': 'volts', + '1': 'amps'} + + self.excitation = excitation_key[_mode][int(_excitation)] + self.excitation_units = excitation_units_key[_mode] + + self.autorange = autorange_key[_autorange] + + self.range = range_key[int(_range)] + + self.csshunt = csshunt_key[_csshunt] + + #self.units = units_key[_units] + + return resp + + def _set_input_setup(self, params): + """Set RDGRNG. + + Parameters are , , , , . + Will determine from attributes. + + :param params: RDGRNG parameters + :type params: list of str + + :returns: response from ls.msg + """ + assert len(params) == 5 + + reply = [str(self.channel_num)] + [reply.append(x) for x in params] + + param_str = ','.join(reply) + return self.ls.msg(f"RDGRNG {param_str}") + + # Public API + + def get_excitation_mode(self): + """Get the excitation mode form RDGRNG? + + :returns: excitation mode, 'current' or 'voltage' + :rtype: str + """ + resp = self._get_input_setup() + self.mode = mode_key[resp[0]] + return self.mode + + def set_excitation_mode(self, excitation_mode): + """Set the excitation mode to either voltage excitation or current + exitation. + + :param excitation_mode: mode we want, must be 'current' or 'voltage' + :type excitation_mode: str + + :returns: reply from RDGRNG call + :rtype: str + + """ + assert excitation_mode in ['voltage', 'current'] + + resp = self._get_input_setup() + resp[0] = mode_lock[excitation_mode] + + self.mode = mode_key[resp[0]] + + return self._set_input_setup(resp) + + def get_excitation(self): + """Get excitation value from RDGRNG? + + :returns: excitation value in volts or amps, depending on mode + :rtype: float + """ + resp = self._get_input_setup() + _mode = resp[0] + _excitation = resp[1] + + excitation_key = {'0': voltage_excitation_key, + '1': current_excitation_key} + + self.excitation = excitation_key[_mode][int(_excitation)] + + return self.excitation + + def set_excitation(self, excitation_value): + """Set voltage/current exitation to specified value via RDGRNG command. + + :param excitation_value: value in volts/amps of excitation + :type excitation_value: float + + :returns: response from RDGRNG command + :rtype: str + """ + _mode = self.mode + + if _mode == 'voltage': + excitation_lock = voltage_excitation_lock + elif _mode == 'current': + excitation_lock = current_excitation_lock + + closest_value = min(excitation_lock, key=lambda x: abs(x-excitation_value)) + + resp = self._get_input_setup() + resp[1] = str(excitation_lock[closest_value]) + + return self._set_input_setup(resp) + + def enable_autorange(self): + """Enable auto range for channel via RDGRNG command.""" + resp = self._get_input_setup() + #order of resp args switch for range, autorange in LS370 + resp[3] = '1' + + #all LS370 channels respond to this command + for c in self.ls.channels: + c.autorange = autorange_key[resp[3]] + + #TODO: move method to LS370 class, fix references in agent + return self._set_input_setup(resp) + + def disable_autorange(self): + """Disable auto range for channel via RDGRNG command.""" + resp = self._get_input_setup() + resp[3] = '0' + + #all LS370 channels respond to this command + for c in self.ls.channels: + c.autorange = autorange_key[resp[3]] + + #TODO: move method to LS370 class, fix references in agent + return self._set_input_setup(resp) + + def set_resistance_range(self, resistance_range): + """Set the resistance range. + + :param resistance_range: range in ohms we want to measure. Doesn't need + to be exactly one of the options on the + lakeshore, will select closest valid range, + though note these are in increments of 2, 6.32, 20, 63.2, etc. + :type resistance_range: float + + :returns: response from RDGRNG command + :rtype: str + """ + + def get_closest_resistance_range(num): + """Gets the closest valid resistance range.""" + ranges = [2.0e-3, 6.32e-3, 20.0e-3, 63.2e-3, 200e-3, 632e-3, 2.0, + 6.32, 20.0, 63.2, 200, 632, 2e3, 6.32e3, 20.0e3, 63.2e3, + 200e3, 632e3, 2e6, 6.32e6, 20.0e6, 63.2e6] + + return min(ranges, key=lambda x: abs(x-num)) + + _range = get_closest_resistance_range(resistance_range) + + resp = self._get_input_setup() + + #order of range, autorange switched in LS370 + resp[2] = str(range_lock[_range]) + self.range = _range + return self._set_input_setup(resp) + + def get_resistance_range(self): + """Get the resistance range. + + :returns: resistance range in Ohms + :rtype: float + """ + resp = self._get_input_setup() + _range = resp[2] + self.range = range_key[int(_range)] + return self.range + + def enable_excitation(self): + """Enable excitation by not shunting the current source via RDGRNG command. + + :returns: state of excitation + :rtype: str + """ + resp = self._get_input_setup() + resp[4] = '0' + + #all LS370 channels respond to this command + for c in self.ls.channels: + c.csshunt = csshunt_key[resp[4]] + + #TODO: move method to LS370 class, fix references in agent + return self._set_input_setup(resp) + + def disable_excitation(self): + """Disable excitation by shunting the current source via RDGRNG command. + + :returns: state of excitation + :rtype: str + """ + resp = self._get_input_setup() + resp[4] = '1' + + #all LS370 channels respond to this command + for c in self.ls.channels: + c.csshunt = csshunt_key[resp[4]] + + #TODO: move method to LS370 class, fix references in agent + return self._set_input_setup(resp) + + def get_excitation_power(self): + """Get the most recent power calculation for the channel via RDGPWR? command. + + :returns: power in Watts + :rtype: float + """ + # TODO: Confirm units on this are watts + resp = self.ls.msg(f"RDGPWR? {self.channel_num}").strip() + return float(resp) + +# def set_units(self, units): +# """Set preferred units using INTYPE command. +# +# :param units: preferred units parameter for sensor readings, 'kelvin' +# or 'ohms' +# :type units: str +# +# :returns: response from INTYPE command +# :rtype: str +# """ +# assert units.lower() in ['kelvin', 'ohms'] +# +# resp = self._get_input_setup() +# resp[5] = units_lock[units.lower()] +# return self._set_input_setup(resp) +# +# def get_units(self): +# """Get preferred units from INTYPE? command. +# +# :returns: preferred units +# :rtype: str +# """ +# resp = self._get_input_setup() +# _units = resp[5] +# self.units = units_key[_units] +# +# return self.units + + def enable_channel(self): + """Enable channel using INSET command. + + :returns: response from self._set_input_channel_parameter() + :rtype: str + """ + resp = self._get_input_channel_parameter() + resp[0] = '1' + self.enabled = True + return self._set_input_channel_parameter(resp) + + def disable_channel(self): + """Disable channel using INSET command. + + :returns: response from self._set_input_channel_parameter() + :rtype: str + """ + resp = self._get_input_channel_parameter() + resp[0] = '0' + self.enabled = False + return self._set_input_channel_parameter(resp) + + def set_dwell(self, dwell): + """Set the autoscanning dwell time. + + :param dwell: Dwell time in seconds + :type dwell: int + + :returns: response from self._set_input_channel_parameter() + :rtype: str + """ + assert dwell in range(1, 201), "Dwell must be 1 to 200 sec" + + resp = self._get_input_channel_parameter() + resp[1] = str(dwell) # seconds + self.dwell = dwell # seconds + return self._set_input_channel_parameter(resp) + + def get_dwell(self): + """Get the autoscanning dwell time. + + :returns: the dwell time in seconds + :rtype: int + """ + resp = self._get_input_channel_parameter() + self.dwell = int(resp[1]) + return self.dwell + + def set_pause(self, pause): + """Set pause time. + + :param pause: Pause time in seconds + :type pause: int + + :returns: response from self._set_input_channel_parameter() + :rtype: str + """ + assert pause in range(3, 201), "Pause must be 3 to 200 sec" + + resp = self._get_input_channel_parameter() + resp[2] = str(pause) # seconds + self.pause = pause # seconds + return self._set_input_channel_parameter(resp) + + def get_pause(self): + """Get the pause time from INSET. + + :returns: the pause time in seconds + :rtype: int + """ + resp = self._get_input_channel_parameter() + self.pause = int(resp[2]) # seconds + return self.pause + + def set_calibration_curve(self, curve_number): + """Set calibration curve using INSET. + + Note: If curve doesn't exist, curve number gets set to 0. + + :param curve_number: Curve number for temperature conversion + :type curve_number: int + """ + assert curve_number in range(0, 60), "Curve number must from 0 to 59" + + resp = self._get_input_channel_parameter() + resp[3] = str(curve_number) + self.curve_num = self.get_calibration_curve() + return self._set_input_channel_parameter(resp) + + def get_calibration_curve(self): + """Get calibration curve number using INSET? + + :returns: curve number in use for the channel + :rtype: int + """ + resp = self._get_input_channel_parameter() + self.curve_num = int(resp[3]) + return self.curve_num + + def set_temperature_coefficient(self, coefficient): + """Set tempertaure coefficient with INSET. + + :param coefficient: set coefficient to be used for temperature control + if no curve is selected, either 'negative' or + 'positive' + :type coefficient: str + + :returns: response from _set_input_channel_parameter() + :rtype: str + """ + assert coefficient in ['positive', 'negative'] + + resp = self._get_input_channel_parameter() + resp[4] = tempco_lock[coefficient] + self.tempco = coefficient + return self._set_input_channel_parameter(resp) + + def get_temperature_coefficient(self): + """Get temperature coefficient from INSET? + + :returns: temperature coefficient + """ + resp = self._get_input_channel_parameter() + self.tempco = tempco_key[resp[4]] + return self.tempco + +# def get_sensor_input_name(self): +# """Run Sensor Input Name Query +# +# :returns: response from INNAME? command +# :rtype: str +# """ +# resp = self.ls.msg(f"INNAME? {self.channel_num}").strip() +# +# self.name = resp +# +# return resp +# +# def set_sensor_input_name(self, name): +# """Set sensor input name using INNAME. +# +# Note: ',' and ';' characters are sanatized from input +# +# :param name: name to give input channel +# :type name: str +# """ +# name = name.replace(',', '').replace(';', '') +# resp = self.ls.msg(f'INNAME {self.channel_num},"{name}"') +# self.name = name +# return resp + + def get_kelvin_reading(self): + """Get temperature reading from channel. + + :returns: temperature from channel in Kelvin + :rtype: float + """ + return float(self.ls.msg(f"RDGK? {self.channel_num}")) + + def get_resistance_reading(self): + """Get resistence reading from channel. + + :returns: resistance from channel in Ohms + :rtype: float + """ + return float(self.ls.msg(f"RDGR? {self.channel_num}")) + + def get_reading_status(self): + """Get status of input reading. + + :returns: list of errors on reading (or None if no errors) + :rtype: list of str + """ + resp = self.ls.msg(f"RDGST? {self.channel_num}") + error_sum = int(resp) + + errors = {128: "T.UNDER", + 64: "T.OVER", + 32: "R.UNDER", + 16: "R.OVER", + 8: "VDIF OVL", + 4: "VMIX OVL", + 2: "VCM OVL", + 1: "CS OVL"} + + error_list = [] + for key, value in errors.items(): + if key <= error_sum: + error_list.append(value) + error_sum -= key + + assert error_sum == 0 + + if len(error_list) == 0: + error_list = None + + return error_list + +# def get_sensor_reading(self): +# """Get sensor reading from channel. +# +# :returns: resistance from channel in Ohms +# :rtype: float +# """ +# return float(self.ls.msg(f"SRDG? {self.channel_num}")) + +# def set_temperature_limit(self, limit): +# """Set temperature limit in kelvin for which to shutdown all control +# outputs when exceeded. A temperature limit of zero turns the +# temperature limit feature off for the given sensor input. +# +# :param limit: temperature limit in kelvin +# :type limit: float +# +# :returns: response from TLIMIT command +# :rtype: str +# """ +# resp = self.ls.msg(f"TLIMIT {self.channel_num},{limit}") +# self.tlimit = limit +# return resp +# +# def get_temperature_limit(self): +# """Get temperature limit, at which output controls are shutdown. +# +# A temperature limit of 0 disables this feature. +# +# :returns: temperature limit in Kelvin +# :rtype: float +# """ +# resp = self.ls.msg(f"TLIMIT? {self.channel_num}").strip() +# self.tlimit = float(resp) +# return self.tlimit + + def __str__(self): + string = "-" * 50 + "\n" + string += "Channel %s" % (self.channel_num) + string += "-" * 50 + "\n" + string += "\t%-30s\t%r\n" % ("Enabled :", self.enabled) + string += "\t%-30s\t%s %s\n" % ("Dwell:", self.dwell, "seconds") + string += "\t%-30s\t%s %s\n" % ("Pause:", self.pause, "seconds") + string += "\t%-30s\t%s\n" % ("Curve Number:", self.curve_num) + string += "\t%-30s\t%s\n" % ("Temperature Coefficient:", self.tempco) + string += "\t%-30s\t%s\n" % ("Excitation State:", self.csshunt) + string += "\t%-30s\t%s\n" % ("Excitation Mode:", self.mode) + string += "\t%-30s\t%s %s\n" % ("Excitation:", self.excitation, self.excitation_units) + string += "\t%-30s\t%s\n" % ("Autorange:", self.autorange) + string += "\t%-30s\t%s %s\n" % ("Resistance Range:", self.range, "ohms") +# string += "\t%-30s\t%s\n" % ("Preferred Units:", self.units) + + return string + + +class Curve: + """Calibration Curve class for the LS370.""" + def __init__(self, ls, curve_num): + self.ls = ls + self.curve_num = curve_num + + self.name = None + self.serial_number = None + self.format = None + self.limit = None + self.coefficient = None + self.get_header() # populates above values + + def get_header(self): + """Get curve header description. + + :returns: response from CRVHDR? in list + :rtype: list of str + """ + resp = self.ls.msg(f"CRVHDR? {self.curve_num}").split(',') + + _name = resp[0].strip() + _sn = resp[1].strip() + _format = resp[2] + _limit = float(resp[3]) + _coefficient = resp[4] + + self.name = _name + self.serial_number = _sn + + self.format = format_key[_format] + + self.limit = _limit + self.coefficient = tempco_key[_coefficient] + + return resp + + def _set_header(self, params): + """Set the Curve Header with the CRVHDR command. + + Parameters should be , , , , + . We will determine from attributes. This + allows us to use output from get_header directly, as it doesn't return + the curve number. + + is limited to 15 characters. Longer names take the fist 15 characters + is limited to 10 characters. Longer sn's take the last 10 digits + + :param params: CRVHDR parameters + :type params: list of str + + :returns: response from ls.msg + """ + assert len(params) == 5 + + _curve_num = self.curve_num + _name = params[0][:15] + _sn = params[1][-10:] + _format = params[2] + assert _format.strip() in ['3', '4'] + _limit = params[3] + _coeff = params[4] + assert _coeff.strip() in ['1', '2'] + + return self.ls.msg(f'CRVHDR {_curve_num},{_name},{_sn},{_format},{_limit},{_coeff}') + + def get_name(self): + """Get the curve name with the CRVHDR? command. + + :returns: The curve name + :rtype: str + """ + self.get_header() + return self.name + + def set_name(self, name): + """Set the curve name with the CRVHDR command. + + :param name: The curve name, limit of 15 characters, longer names get truncated + :type name: str + + :returns: the response from the CRVHDR command + :rtype: str + """ + resp = self.get_header() + resp[0] = name.upper() + self.name = resp[0] + return self._set_header(resp) + + def get_serial_number(self): + """Get the curve serial number with the CRVHDR? command." + + :returns: The curve serial number + :rtype: str + """ + self.get_header() + return self.serial_number + + def set_serial_number(self, serial_number): + """Set the curve serial number with the CRVHDR command. + + :param serial_number: The curve serial number, limit of 10 characters, + longer serials get truncated + :type name: str + + :returns: the response from the CRVHDR command + :rtype: str + """ + resp = self.get_header() + resp[1] = serial_number + self.serial_number = resp[1] + return self._set_header(resp) + + def get_format(self): + """Get the curve data format with the CRVHDR? command." + + :returns: The curve data format + :rtype: str + """ + self.get_header() + return self.format + + def set_format(self, _format): + """Set the curve format with the CRVHDR command. + + :param _format: The curve format, valid formats are: + "Ohm/K (linear)" + "log Ohm/K (linear)" + "Ohm/K (cubic spline)" + :type name: str + + :returns: the response from the CRVHDR command + :rtype: str + """ + resp = self.get_header() + + assert _format in format_lock.keys(), "Please select a valid format" + + resp[2] = format_lock[_format] + self.format = _format + return self._set_header(resp) + + def get_limit(self): + """Get the curve temperature limit with the CRVHDR? command. + + :returns: The curve temperature limit + :rtype: str + """ + self.get_header() + return self.limit + + def set_limit(self, limit): + """Set the curve temperature limit with the CRVHDR command. + + :param limit: The curve temperature limit + :type limit: float + + :returns: the response from the CRVHDR command + :rtype: str + """ + resp = self.get_header() + resp[3] = str(limit) + self.limit = limit + return self._set_header(resp) + + def get_coefficient(self): + """Get the curve temperature coefficient with the CRVHDR? command. + + :returns: The curve temperature coefficient + :rtype: str + """ + self.get_header() + return self.coefficient + + def set_coefficient(self, coefficient): + """Set the curve temperature coefficient with the CRVHDR command. + + :param coefficient: The curve temperature coefficient, either 'positive' or 'negative' + :type limit: str + + :returns: the response from the CRVHDR command + :rtype: str + """ + assert coefficient in ['positive', 'negative'] + + resp = self.get_header() + resp[4] = tempco_lock[coefficient] + self.tempco = coefficient + return self._set_header(resp) + + def get_data_point(self, index): + """Get a single data point from a curve, given the index, using the + CRVPT? command. + + The format for the return value, a 2-tuple of floats, is chosen to work + with how the get_curve() method later stores the entire curve in a + numpy structured array. + + :param index: index of breakpoint to query + :type index: int + + :returns: (units, temperature) values for the given breakpoint + :rtype: 3-tuple of floats + """ + resp = self.ls.msg(f"CRVPT? {self.curve_num},{index}").split(',') + _units = float(resp[0]) + _temp = float(resp[1]) + return (_units, _temp) + + def _set_data_point(self, index, units, kelvin, curvature=None): + """Set a single data point with the CRVPT command. + + :param index: data point index + :type index: int + :param units: value of the sensor units to 6 digits + :type units: float + :param kelvin: value of the corresponding temp in Kelvin to 6 digits + :type kelvin: float + + :returns: response from the CRVPT command + :rtype: str + """ + resp = self.ls.msg(f"CRVPT {self.curve_num}, {index}, {units}, {kelvin}") + return resp + + # Public API Elements + def get_curve(self, _file=None): + """Get a calibration curve from the LS370. + + If _file is not None, save to file location. + + :param _file: the file to load the calibration curve from + :type _file: str + """ + breakpoints = [] + for i in range(1, 201): + x = self.get_data_point(i) + if x[0] == 0: + break + breakpoints.append(x) + + struct_array = np.array(breakpoints, dtype=[('units', 'f8'), + ('temperature', 'f8')]) + + self.breakpoints = struct_array + + if _file is not None: + with open(_file, 'w') as f: + f.write('Sensor Model:\t' + self.name + '\r\n') + f.write('Serial Number:\t' + self.serial_number + '\r\n') + f.write('Data Format:\t' + format_lock[self.format] + f'\t({self.format})\r\n') + + #TODO: shouldn't this be the curve_header limit? + #above is done ZA 20200405 + f.write('SetPoint Limit:\t%s\t(Kelvin)\r\n' % '%0.4f' % self.limit) + f.write('Temperature coefficient:\t' + tempco_lock[self.coefficient] + f' ({self.coefficient})\r\n') + f.write('Number of Breakpoints:\t%s\r\n' % len(self.breakpoints)) + f.write('\r\n') + f.write('No.\tUnits\tTemperature (K)\r\n') + f.write('\r\n') + for idx, point in enumerate(self.breakpoints): + f.write('%s\t%s %s\r\n' % (idx+1, '%0.4f' % point['units'], '%0.4f' % point['temperature'])) + + return self.breakpoints + + def set_curve(self, _file): + """Set a calibration curve, loading it from the file. + + :param _file: the file to load the calibration curve from + :type _file: str + + :returns: return the new curve header, refreshing the attributes + :rtype: list of str + """ + with open(_file) as f: + content = f.readlines() + + header = [] + for i in range(0, 6): + if i < 2 or i > 4: + header.append(content[i].strip().split(":", 1)[1].strip()) + else: + header.append(content[i].strip().split(":", 1)[1].strip().split("(", 1)[0].strip()) + + # Skip to the R and T values in the file and strip them of tabs, newlines, etc + values = [] + for i in range(9, len(content)): + values.append(content[i].strip().split()) + + self.delete_curve() # remove old curve first, so old breakpoints don't remain + + self._set_header(header[:-1]) # ignore num of breakpoints + + for point in values: + print("uploading %s"%point) + self._set_data_point(point[0], point[1], point[2]) + + # refresh curve attributes + self.get_header() + self._check_curve(_file) + + def _check_curve(self, _file): + """After setting a data point for calibration curve, + use CRVPT? command from get_data_point() to check + that all points of calibration curve were uploaded. + If not, re-upload points. + + :param _file: calibration curve file + :type _file: str + """ + + with open(_file) as f: + content = f.readlines() + + #skipping header info + values = [] + for i in range(9, len(content)): + values.append(content[i].strip().split()) #data points that should have been uploaded + + #TODO: shouldn't this be capped at len(values) + 1? + #above is done ZA 20200330 + for j in range(1, len(values) + 1): + try: + resp = self.get_data_point(j) #response from the 370 + point = values[j-1] + units = float(resp[0]) + temperature = float(resp[1]) + assert units == float(point[1]), "Point number %s not uploaded"%point[0] + assert temperature == float(point[2]), "Point number %s not uploaded"%point[0] + print("Successfully uploaded %s, %s" %(units,temperature)) + #if AssertionError, tell 370 to re-upload points + + #TODO: shouldn't this condition on either units or temperature, not just units? + #above is done ZA 20200330 + except AssertionError: + if units != float(point[1]) or temperature != float(point[2]): + + #TODO: fix, could enter infinite loop if always fails + self.set_curve(_file) + + #check that remainining points are zeros + for j in range(len(values) + 1, 201): + try: + resp = self.get_data_point(j) #response from the 370 + units = float(resp[0]) + temperature = float(resp[1]) + assert units == 0, "Point number %s contains nonzero data"%j + assert temperature == 0, "Point number %s contains nonzero data"%j + except AssertionError: + if units != 0 or temperature != 0: + + #TODO: fix, could enter infinite loop if always fails + self.set_curve(_file) + + def delete_curve(self): + """Delete the curve using the CRVDEL command. + + :returns: the response from the CRVDEL command + :rtype: str + """ + resp = self.ls.msg(f"CRVDEL {self.curve_num}") + self.get_header() + return resp + + def __str__(self): + string = "-" * 50 + "\n" + string += "Curve %d: %s\n" % (self.curve_num, self.name) + string += "-" * 50 + "\n" + string += " %-30s\t%r\n" % ("Serial Number:", self.serial_number) + string += " %-30s\t%s (%s)\n" % ("Format :", format_lock[self.format], self.format) + string += " %-30s\t%s\n" % ("Temperature Limit:", self.limit) + string += " %-30s\t%s\n" % ("Temperature Coefficient:", self.coefficient) + + return string + +#TODO: make new Analog class. Too many firmware distictions to group both Heater and Analog outputs +#into same class of objects +class Heater: + """Heater class for LS370 control + + :param ls: the lakeshore object we're controlling + :type ls: Lakeshore370.LS370 + """ + def __init__(self, ls): + self.ls = ls + + self.mode = None + self.input = None + #self.powerup = None in 370, powerup is always disabled + self.polarity = None + self.filter = None + self.delay = None + self.units = None + + self.range = None + + self.resistance = None #only for output = 0 + #self.max_current = None in 370, there is only htrrng limit and curve limit + #self.max_user_current = None not in 370 + self.rng_limit = None + self.display = None + + self._get_output_mode() + self.get_heater_range() + self.get_heater_setup() + + def _get_output_mode(self): + """Query the heater mode using the CMODE?, CPOL?, CSET? commands. + + :returns: 6-tuple with output mode, polarity, input channel, + unfiltered/filtered, heater units (kelvin, ohms), and autoscanning delay time. + :rtype: tuple + """ + _mode = self.ls.msg('CMODE?') + self.mode = output_modes[_mode] + self.polarity = self.ls.msg('CPOL?') + + resp = self.ls.msg('CSET?').split(',') + self.input = resp[0] + self.filter = resp[1] + self.units = units_key[resp[2]] + self.delay = resp[3] + + return [self.mode, self.polarity, self.input, self.filter, self.units, self.delay] + + def _set_output_mode(self, params): + """Set the output mode of the heater with the CMODE, CPOL, CSET commands. + + Parameters should be , , , , , + . + + :param params: CMODE/CPOL/CSET parameters + :type params: list of str + + :returns: response from ls.msg + """ + assert len(params) == 6 + + self.ls.msg(f'CMODE {params.pop(0)}') + self.ls.msg(f'CPOL {params.pop(0)}') + + reply = params + [heater_display_lock[self.display], heater_range_lock[self.rng_limit], + str(self.resistance)] + + param_str = ','.join(reply) + return self.ls.msg(f"CSET {param_str}") + + def get_heater_setup(self): + """Gets Heater setup params with the CSET? command. + + :return resp: List of values that have been returned from the Lakeshore. + """ + resp = self.ls.msg("CSET?").split(',') + + self.display = heater_display_key[resp[4]] + self.rng_limit = heater_range_key[resp[5]] + self.resistance = float(resp[6]) + #self.max_current = int(resp[1]) + #self.max_user_current = float(resp[2].strip('E+')) + + return [self.display, self.rng_limit, self.resistance] + + def _set_heater_setup(self, params): + """ + Sets the heater setup using the CSET command. + + Params must be a list with the parameters: + : Specifies if heater display is current or power. + 1=current, 2=power. + : Max heater range; ranges according to HTRRNG command + : Heater load in Ohms (Sample); + 1=25 Ohms, 2=50 Ohms (warmp-up) + + :param params: + :return: + """ + assert len(params) == 3 + + reply = [self.input, self.filter, units_lock[self.units], self.delay] + params + param_str = ','.join(reply) + return self.ls.msg("CSET {}".format(param_str)) + + def get_mode(self): + """Set output mode with CMODE? commnd. + + :returns: The output mode + :rtype: str + """ + self._get_output_mode() + return self.mode + + def set_mode(self, mode): + """Set output mode with CMODE commnd. + + :param mode: control mode for heater, see page 6-24 pf Lakeshore 370 manual + :type mode: str + + :returns: the response from the OUTMODE command + """ + # TODO: Make assertions check specific output and it's validity in mode selection + assert mode.lower() in output_modes_lock.keys(), f"{mode} not a valid mode" + + resp = self._get_output_mode() + resp[0] = output_modes_lock[mode.lower()] + self.mode = mode + return self._set_output_mode(resp) + + def get_manual_out(self): + resp = self.ls.msg("MOUT?") + return float(resp) + + def get_input_channel(self): + """Get the control channel with the CSET? command. + + :returns: The control channel + :rtype: str + """ + self._get_output_mode() + return self.input + + def set_input_channel(self, _input): + """Set the control channel with the CSET command. + + :param _input: specifies which input or channel to control from + :type _input: str or int + """ + #ZA fixed to range(1, 17) from range(17). deleted 'A' + assert int(_input) in range(1, 17), f"{_input} not a valid input/channel" + + resp = self._get_output_mode() + resp[2] = str(_input) + self.input = str(_input) + return self._set_output_mode(resp) + + def get_powerup(self): + pass + + def set_powerup(self, powerup): + """ + :param powerup: specifies whether the output remains on or shuts off + after power cycle. True for on after powerup + :type powerup: bool + """ + # assert powerup in [True, False], f"{powerup} not valid powerup parameter" + # set_powerup = str(int(powerup)) + # + pass + + def get_polarity(self): + pass + + def set_polarity(self): + """ + :param polarity: specifies output polarity: 'unipolar' or 'bipolar' + :type polarity: str + """ + # polarity_key = {0: 'unipolar', 1: 'bipolar'} + # polarity_lock = {v:k for k, v in polarity_key.items()} + # + # assert polarity in polarity_lock.keys(), f"{polarity} not a valid polarity parameter" + # + # {polarity_lock[polarity]} + pass + + def get_filter(self): + pass + + def set_filter(self, _filter): + """ + :param _filter: specifies controlling on unfiltered or filtered readings, True = filtered, False = unfiltered + :type _filter: bool + """ + # assert _filter in [True, False], f"{_filter} not valid filter parameter" + # set_filter = str(int(_filter)) + # + pass + + def get_units(self): + """Get the setpoint units with the CSET? command. + + :returns: units, either 'kelvin' or 'ohms' + :rtype: str + """ + self._get_output_mode() + return self.units + + def set_units(self, units): + """Set the setpoint units with the CSET command. + + :param units: units, either 'kelvin' or 'ohms' + :type units: str + """ + assert units.lower() in units_lock.keys(), f"{units} not a valid unit" + + resp = self._get_output_mode() + resp[4] = units_lock[units.lower()] + self.units = units.lower() + return self._set_output_mode(resp) + + def get_delay(self): + pass + + def set_delay(self, delay): + """ + :param delay: delay in seconds for setpoint change during autoscanning, 1-255 seconds + :type delay: int + """ + # assert delay in range(1, 256), f"{delay} not a valid delay parameter" + # + pass + + def set_heater_display(self, display): + """ + :param display: Display mode for heater. Can either be 'current' or 'power'. + :type display: string + """ + assert display.lower() in heater_display_lock.keys(), f"{display} is not a valid display" + + resp = self.get_heater_setup() + resp[0] = heater_display_lock[display.lower()] + + self._set_heater_setup(resp) + + self.get_heater_setup() + + # Presumably we're going to know and have set values for heat resistance, + # max current, etc, maybe that'll simplify this in the future. + def set_heater_output(self, output, display_type=None): + """Set heater output with MOUT command. + + :param output: heater output value. If display is 'power', value should + be in Watts. If 'current', value should be in percent. + :type output: float + :param display_type: Display type if you want to set this before setting heater. + Can be 'power' or 'current'. + :type display_type: string + + :returns: heater output + :rtype: float + """ + + if display_type is not None: + self.set_heater_display(display_type) + + self.get_heater_range() + self.get_heater_setup() + + if self.range in ["off", "Off"]: + print("Heater range is off... Not setting output") + return False + + # For sample heater + max_pow = self.range ** 2 * self.resistance + + if self.display == 'power': + if 0 <= output <= max_pow: + self.ls.msg(f"MOUT {output}") + return True + else: + print("Cannot set to {} W, max power is {:2e} W".format( + output, max_pow)) + return False + + if self.display == 'current': + if 0 <= output <= 100: + self.ls.msg(f"MOUT {output}") + return True + else: + print( + "Display is current: output must be between 0 and 100") + return False + + # RAMP, RAMP? - in heater class + def set_ramp_rate(self, rate): + pass + + def get_ramp_rate(self, rate): + pass + + def enable_ramp(self): + pass + + def disable_ramp(self): + pass + + # RAMPST? + def get_ramp_status(self): + pass + + # RANGE + def set_heater_range(self, _range): + """Set heater range with HTRRNG command. + + :param _range: heater range + :type _range: float or str (for "On" "Off") + + :returns: heater range in amps + :rtype: float + """ + assert _range in heater_range_lock.keys() or str(_range).lower() in ['on', 'off'], 'Not a valid heater Range' + + if str(_range).lower() == 'off': + _range = "Off" + if str(_range).lower() == 'on': + _range = "On" + + resp = self.ls.msg(f"HTRRNG {heater_range_lock[_range]}").strip() + + # refresh self.heater value with RANGE? query + self.get_heater_range() + + def get_heater_range(self): + """Get heater range with HTRRNG? command. + + :returns: heater range in amps + :rtype: float + """ + resp = self.ls.msg(f"HTRRNG?").strip() + + self.range = heater_range_key[resp] + + return self.range + + # SETP - heater class, uses self.units to interpret value + def set_setpoint(self, value): + self.ls.msg(f"SETP {value}") + + # SETP? - heater class, uses self.units to interpret value + def get_setpoint(self): + resp = self.ls.msg(f"SETP?") + return resp + + # STILL - heater class? + def set_still_output(self, value): + self.ls.msg(f"STILL {value}") + + # STILL? - heater_class? + def get_still_output(self): + resp = self.ls.msg(f"STILL?") + return resp + + # ANALOG, ANALOG?, AOUT? + # TODO: read up on what analog output is used for, pretty sure just another output + def get_analog_output(self): + pass + + def set_analog_output(self): + pass + + # PID + def set_pid(self, P, I, D): + """Set PID parameters for closed loop control. + + :params P: proportional term in PID loop + :type P: float + :params I: integral term in PID loop + :type I: float + :params D: derivative term in PID loop + :type D: float + + :returns: response from PID command + :rtype: str + """ + assert float(P) <= 1000 and float(P) >= 0 + assert float(I) <= 10000 and float(I) >= 0 + assert float(D) <= 2500 and float(D) >= 0 + + resp = self.ls.msg(f"PID {P},{I},{D}") + return resp + + # PID? + def get_pid(self): + """Get PID parameters with PID? command. + + :returns: P, I, D + :rtype: float, float, float + """ + resp = self.ls.msg("PID?").split(',') + return float(resp[0]), float(resp[1]), float(resp[2]) + + +if __name__ == "__main__": + ls = LS370(sys.argv[1]) + print(ls.msg('*IDN?')) + print(f'LS370 successfully initialized at port {sys.argv[1]}') From 58cc737c0143d677d5daa74e11455f294d016067 Mon Sep 17 00:00:00 2001 From: Zach Huber <78050507+zhuber21@users.noreply.github.com> Date: Thu, 6 May 2021 15:56:49 -0400 Subject: [PATCH 40/43] Add set_still_output() and get_still_output() Added two functions to set the still output on the still heater and to get that output for confirming that things are working as expected. --- agents/lakeshore372/LS372_agent.py | 68 ++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/agents/lakeshore372/LS372_agent.py b/agents/lakeshore372/LS372_agent.py index 47cecde84..77e894b92 100644 --- a/agents/lakeshore372/LS372_agent.py +++ b/agents/lakeshore372/LS372_agent.py @@ -595,6 +595,9 @@ def set_heater_output(self, session, params=None): output - Specifies heater output value. If display is set to "Current" or heater is "still", can be any number between 0 and 100. If display is set to "Power", can be any number between 0 and the maximum allowed power. + + Note that for the still heater this sets the still heater manual output, NOT the still heater still output. + Use set_still_output() instead to set the still output. display (opt)- Specifies heater display type. Can be "Current" or "Power". If None, heater display is not reset before setting output. @@ -629,6 +632,69 @@ def set_heater_output(self, session, params=None): session.app.publish_to_feed('temperatures', data) return True, "Set {} display to {}, output to {}".format(heater, display, output) + + def set_still_output(self, session, params=None): + """ + Set the still output on the still heater. This is different than the manual output on the still heater. Use set_heater_output() for that. + + :param params: dict with "output" parameter + :type params: dict + + output - Specifies still heater output value. + Can be any number between 0 and 100. + + """ + + with self._lock.acquire_timeout(job='set_still_output') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + output = params['output'] + + self.module.still_heater.set_still_output(output) + + self.log.info("Set still output to {}".format(output)) + + session.set_status('running') + + data = {'timestamp': time.time(), + 'block_name': 'still_heater_still_out', + 'data': {'still_heater_still_out': output} + } + session.app.publish_to_feed('temperatures', data) + + return True, "Set still output to {}".format(output) + + def get_still_output(self, session, params=None): + """ + Gets the current still output on the still heater. + + :param params: dict + :type params: dict + + """ + + with self._lock.acquire_timeout(job='get_still_output') as acquired: + if not acquired: + self.log.warn(f"Could not start Task because " + f"{self._lock.job} is already running") + return False, "Could not acquire lock" + + still_output = self.module.still_heater.get_still_output() + + self.log.info("Current still output is {}".format(still_output)) + + session.set_status('running') + + data = {'timestamp': time.time(), + 'block_name': 'still_heater_still_out', + 'data': {'still_heater_still_out': still_output} + } + session.app.publish_to_feed('temperatures', data) + + return True, "Current still output is {}".format(still_output) def make_parser(parser=None): """Build the argument parser for the Agent. Allows sphinx to automatically @@ -702,6 +768,8 @@ def make_parser(parser=None): agent.register_task('check_temperature_stability', lake_agent.check_temperature_stability) agent.register_task('set_output_mode', lake_agent.set_output_mode) agent.register_task('set_heater_output', lake_agent.set_heater_output) + agent.register_task('set_still_output', lake_agent.set_still_output) + agent.register_task('get_still_output', lake_agent.get_still_output) agent.register_process('acq', lake_agent.start_acq, lake_agent.stop_acq) runner.run(agent, auto_reconnect=True) From d78d4f13850ae74a11974f9a0a45e7dba6afac39 Mon Sep 17 00:00:00 2001 From: Zach Huber Date: Thu, 6 May 2021 17:28:40 -0400 Subject: [PATCH 41/43] Fixed get_still_output() --- agents/lakeshore372/LS372_agent.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/agents/lakeshore372/LS372_agent.py b/agents/lakeshore372/LS372_agent.py index 77e894b92..8aeb3dac5 100644 --- a/agents/lakeshore372/LS372_agent.py +++ b/agents/lakeshore372/LS372_agent.py @@ -687,12 +687,7 @@ def get_still_output(self, session, params=None): self.log.info("Current still output is {}".format(still_output)) session.set_status('running') - - data = {'timestamp': time.time(), - 'block_name': 'still_heater_still_out', - 'data': {'still_heater_still_out': still_output} - } - session.app.publish_to_feed('temperatures', data) + session.data = {"still_heater_still_out": still_output} return True, "Current still output is {}".format(still_output) From d990c7fc67da8292d3ec5d8437f881d43cb6d4ba Mon Sep 17 00:00:00 2001 From: Zach Huber Date: Tue, 18 May 2021 18:21:53 -0400 Subject: [PATCH 42/43] Fixed documentation issues --- agents/lakeshore372/LS372_agent.py | 11 ++++++++--- docs/agents/lakeshore372.rst | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/agents/lakeshore372/LS372_agent.py b/agents/lakeshore372/LS372_agent.py index 8aeb3dac5..28efcd74a 100644 --- a/agents/lakeshore372/LS372_agent.py +++ b/agents/lakeshore372/LS372_agent.py @@ -635,7 +635,8 @@ def set_heater_output(self, session, params=None): def set_still_output(self, session, params=None): """ - Set the still output on the still heater. This is different than the manual output on the still heater. Use set_heater_output() for that. + Set the still output on the still heater. This is different than the manual output + on the still heater. Use set_heater_output() for that. :param params: dict with "output" parameter :type params: dict @@ -671,8 +672,12 @@ def get_still_output(self, session, params=None): """ Gets the current still output on the still heater. - :param params: dict - :type params: dict + This task has no useful parameters. + + The still heater output is stored in the session.data + object in the format:: + + {"still_heater_still_out": 9.628} """ diff --git a/docs/agents/lakeshore372.rst b/docs/agents/lakeshore372.rst index dc5281ee2..3c5826202 100644 --- a/docs/agents/lakeshore372.rst +++ b/docs/agents/lakeshore372.rst @@ -82,7 +82,7 @@ Agent API --------- .. autoclass:: agents.lakeshore372.LS372_agent.LS372_Agent - :members: init_lakeshore_task, start_acq + :members: Driver API ---------- From 86cd133c8519495b1f54ba894deeecfba8c10278 Mon Sep 17 00:00:00 2001 From: Brian Koopman Date: Wed, 19 May 2021 15:48:02 -0400 Subject: [PATCH 43/43] Update ocs base image to v0.8.0 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index bf1d6a449..09a1d91fa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ # A container setup with an installation of socs. # Use the ocs image as a base -FROM simonsobs/ocs:v0.7.1-17-g1162576-dev +FROM simonsobs/ocs:v0.8.0 # Copy the current directory contents into the container at /app COPY . /app/socs/