diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6d80fa84..dfeb053a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,7 @@ Added - Option to pass background image to ``utils.io.load_data``. - Option to set image resolution with ``hardware.utils.display`` function. +- Add utility for mask adapter generation in ``lenseless.hardware.fabrication`` - Option to add simulated background in ``util.dataset`` - Auxiliary of reconstructing output from pre-processor (not working). - Option to set focal range for MultiLensArray. @@ -62,6 +63,7 @@ Added - Fallback for normalization if data not in 8bit range (``lensless.utils.io.save_image``). - Add utilities for fabricating masks with 3D printing (``lensless.hardware.fabrication``). - WandB support. +- Script for Mask adapter generation and update new mount in doc Changed ~~~~~~~ diff --git a/configs/collect_dataset.yaml b/configs/collect_dataset.yaml index c54ef8e7..75df8597 100644 --- a/configs/collect_dataset.yaml +++ b/configs/collect_dataset.yaml @@ -1,6 +1,6 @@ # python scripts/collect_dataset_on_device.py -cn collect_dataset -input_dir: /mnt/mirflickr/10 +input_dir: /mnt/mirflickr/all input_file_ext: jpg # can pass existing folder to continue measurement @@ -41,6 +41,9 @@ display: landscape: False # whether to force landscape capture: + measure_bg: False # measure bg every x images, set False if not measuring background + bg_fp: "black_background" + framerate: 10 skip: False # to test looping over displaying images config_pause: 3 iso: 100 diff --git a/configs/collect_dataset_background.yaml b/configs/collect_dataset_background.yaml new file mode 100644 index 00000000..9f509eda --- /dev/null +++ b/configs/collect_dataset_background.yaml @@ -0,0 +1,26 @@ +# python scripts/measure/collect_dataset_on_device.py -cn collect_dataset_background +defaults: + - collect_dataset + - _self_ + + +output_dir: /mnt/mirflickr/all_measured_20240813-183259 + +# files to measure +n_files: 25000 + +min_level: 160 +max_tries: 3 + + +# -- display parameters +display: + screen_res: [1920, 1200] # width, height + image_res: [600, 600] # useful if input images don't have the same dimension, set it to this + vshift: -34 + +capture: + measure_bg: 1 # measure bg every x images, set False if not measuring background + awb_gains: [1.8, 1.1] # red, blue + fact_increase: 1.35 # multiplicative factor to increase exposure + fact_decrease: 1.3 diff --git a/configs/upload_tapecam_mirflickr_ambient.yaml b/configs/upload_tapecam_mirflickr_ambient.yaml index 0d62238a..f1196ca3 100644 --- a/configs/upload_tapecam_mirflickr_ambient.yaml +++ b/configs/upload_tapecam_mirflickr_ambient.yaml @@ -4,13 +4,13 @@ defaults: - _self_ repo_id: "Lensless/TapeCam-Mirflickr-Ambient" -n_files: null +n_files: 16000 test_size: 0.15 # -- to match TapeCam without ambient light split: 100 # "first: first `nfiles*test_size` for test, `int`: test_size*split for test (interleaved) as if multimask with this many masks lensless: - dir: data/100_samples + dir: /dev/shm/tape_15k_ambient/all_measured_20240805-143922 ambient: True ext: ".png" diff --git a/docs/source/fabrication.rst b/docs/source/fabrication.rst index 83fc3963..d4ac9bdd 100644 --- a/docs/source/fabrication.rst +++ b/docs/source/fabrication.rst @@ -7,6 +7,14 @@ :alt: Mount components. :align: center + + Note that the most recent version of the mount looks like this, with the addition of stoppers, + to prevent the mask from scratching the Pi Camera. + This new version of the mount can be found `here `_ + + .. image:: mount_V4.png + :alt: New Inner Mount w/ Stopper. + :align: center Mask3DModel ~~~~~~~~~~~ @@ -22,6 +30,14 @@ :members: :special-members: __init__ + Because newer versions of the masks' size are smaller, the following adapter enables + them to be used with the current mounts design. + + .. image:: mask_adapter.png + :alt: Mask Adapter. + :align: center + + MultiLensMold ~~~~~~~~~~~~~ diff --git a/docs/source/mask_adapter.png b/docs/source/mask_adapter.png new file mode 100644 index 00000000..e8cb7eab Binary files /dev/null and b/docs/source/mask_adapter.png differ diff --git a/docs/source/mount_V4.png b/docs/source/mount_V4.png new file mode 100644 index 00000000..1f8d7703 Binary files /dev/null and b/docs/source/mount_V4.png differ diff --git a/lensless/hardware/fabrication.py b/lensless/hardware/fabrication.py index 39425fd8..411d9f96 100644 --- a/lensless/hardware/fabrication.py +++ b/lensless/hardware/fabrication.py @@ -520,3 +520,74 @@ def generate(self, mask: np.ndarray, mask_size, depth: float) -> cq.Workplane: ) return model + + +def create_mask_adapter( + fp, mask_w, mask_h, mask_d, adapter_w=12.90, adapter_h=9.90, support_w=0.4, support_d=0.4 +): + """ + Create and store an adapter for a mask given its measurements. + Warning: Friction-fitted parts are to be made 0.05-0.1 mm smaller + (ex: mask's width must fit in adapter's, adapter's width must fit in mount's, ...) + + Parameters + ---------- + fp : string + Folder in which to store the generated stl file. + mask_w : float + Length of the mask's width in mm. + mask_h : float + Length of the mask's height in mm. + mask_d : float + Thickness of the mask in mm. + adapter_w : float + Length of the adapter's width in mm. + default: current mount dim (13 - 0.1 mm) + adapter_h : float + Length of the adapter's height in mm. + default: current mount dim (1.5 - 0.1 mm) + support_w : float + Width of the small extrusion to support the mask in mm + default : current mount's dim (10 - 0.1 mm) + support_d : float + Thickness of the small extrusion to support the mask in mm + """ + epsilon = 0.2 + + # Make sure the dimension are realistic + assert mask_w < adapter_w - epsilon, "mask's width too big" + assert mask_h < adapter_h - epsilon, "mask's height too big" + assert mask_w - 2 * support_w > epsilon, "mask's support too big" + assert mask_h - 2 * support_w > epsilon, "mask's support too big" + assert os.path.exists(fp), "folder does not exist" + + file_name = os.path.join(fp, "mask_adapter.stl") + + # Prevent accidental overwrite + if os.path.isfile(file_name): + print("Warning: already find mask_adapter.stl at " + fp) + if input("Overwrite ? y/n") != "y": + print("Abort adapter generation.") + return + + # Construct the outer layer of the mask + adapter = ( + cq.Workplane("front") + .rect(adapter_w, adapter_h) + .rect(mask_w, mask_h) + .extrude(mask_d + support_d) + ) + + # Construct the dent to keep the mask secure + support = ( + cq.Workplane("front") + .rect(mask_w, mask_h) + .rect(mask_w - 2 * support_w, mask_h - 2 * support_w) + .extrude(support_d) + ) + + # Join the 2 shape in one + adapter = adapter.union(support) + + # Save into path + cq.exporters.export(adapter, file_name) diff --git a/scripts/measure/collect_dataset_on_device.py b/scripts/measure/collect_dataset_on_device.py index 8bb34077..4b1314b2 100644 --- a/scripts/measure/collect_dataset_on_device.py +++ b/scripts/measure/collect_dataset_on_device.py @@ -11,6 +11,7 @@ To test on local machine, set dummy=True (which will just copy the files over). """ +import json import numpy as np import hydra @@ -28,7 +29,6 @@ import glob from lensless.hardware.slm import set_programmable_mask, adafruit_sub2full - from lensless.hardware.constants import ( RPI_HQ_CAMERA_CCM_MATRIX, RPI_HQ_CAMERA_BLACK_LEVEL, @@ -52,7 +52,6 @@ def natural_sort(arr): @hydra.main(version_base=None, config_path="../../configs", config_name="collect_dataset") def collect_dataset(config): - input_dir = config.input_dir output_dir = config.output_dir if output_dir is None: @@ -162,7 +161,7 @@ def collect_dataset(config): camera.close() # -- now set up camera with desired settings - camera = PiCamera(sensor_mode=0, resolution=tuple(res)) + camera = PiCamera(sensor_mode=0, resolution=tuple(res), framerate=config.capture.framerate) # Set ISO to the desired value camera.resolution = tuple(res) @@ -202,6 +201,10 @@ def collect_dataset(config): exposure_vals = [] brightness_vals = [] n_tries_vals = [] + + bg_name = None + current_bg = {} + shutter_speed = init_shutter_speed for i, _file in enumerate(tqdm.tqdm(files[start_idx:])): # save file in output directory as PNG @@ -210,147 +213,55 @@ def collect_dataset(config): # if not done, perform measurement if not os.path.isfile(output_fp): - if config.dummy: shutil.copyfile(_file, output_fp) time.sleep(1) - else: - # -- show on display - screen_res = np.array(config.display.screen_res) - hshift = config.display.hshift - vshift = config.display.vshift - pad = config.display.pad - brightness = init_brightness - display_image_path = config.display.output_fp - rot90 = config.display.rot90 - display_command = f"python scripts/measure/prep_display_image.py --fp {_file} --output_path {display_image_path} --screen_res {screen_res[0]} {screen_res[1]} --hshift {hshift} --vshift {vshift} --pad {pad} --brightness {brightness} --rot90 {rot90}" - if config.display.landscape: - display_command += " --landscape" - if config.display.image_res is not None: - display_command += ( - f" --image_res {config.display.image_res[0]} {config.display.image_res[1]}" - ) - # print(display_command) - os.system(display_command) - - time.sleep(config.display.delay) - - if not config.capture.skip: - - # -- set mask pattern - if config.masks is not None: - mask_idx = (i + start_idx) % config.masks.n - mask_fp = mask_dir / f"mask_{mask_idx}.npy" - print("using mask: ", mask_fp) - mask_vals = np.load(mask_fp) - full_pattern = adafruit_sub2full( - mask_vals, - center=config.masks.center, - ) - set_programmable_mask(full_pattern, device=config.masks.device) - - # -- take picture - max_pixel_val = 0 - fact_increase = config.capture.fact_increase - fact_decrease = config.capture.fact_decrease - n_tries = 0 - - camera.shutter_speed = init_shutter_speed - time.sleep(config.capture.config_pause) + # display img + display_img(_file, config, init_brightness) + # capture img + output, _, _, camera = capture_screen(MAX_LEVEL, MAX_TRIES, MIN_LEVEL, _file, brightness_vals, camera, config, down, + exposure_vals, g, i, init_brightness, shutter_speed, None, + n_tries_vals, + output_fp, start_idx) + + if config.capture.measure_bg: + # name of background for current image + bg_name = plib.Path(config.capture.bg_fp + str(i)).with_suffix(f".{config.output_file_ext}") + bg = output_dir / bg_name + + # append current file to bg list + if str(bg_name) not in current_bg: + current_bg[str(bg_name)] = str(_file.name) + else: + current_bg[str(bg_name)].append(str(_file.name)) + # capture background periodically + if i % config.capture.measure_bg == 0 or (i == n_files - 1): + bg_name = str(bg_name) + # push the last bg-capture pairs + if current_bg: + with open(output_dir / "bg_mappings.json", 'a') as outfile: + json.dump(current_bg, outfile, indent=4) + current_bg = {} + #current_bg[bg_name] = None + + # display bg + display_img(None, config, brightness=init_brightness) + # capture bg + output, shutter_speed, init_brightness, camera = capture_screen(MAX_LEVEL, 0, MIN_LEVEL, + plib.Path(config.capture.bg_fp + str(i)).with_suffix(f".{config.output_file_ext}"), + brightness_vals, camera, config, down, + exposure_vals, g, i, init_brightness, shutter_speed, None, + n_tries_vals, + bg, start_idx) - current_screen_brightness = init_brightness - current_shutter_speed = camera.shutter_speed - print(f"current shutter speed: {current_shutter_speed}") - print(f"current screen brightness: {current_screen_brightness}") - - while max_pixel_val < MIN_LEVEL or max_pixel_val > MAX_LEVEL: - - # get bayer data - stream = picamerax.array.PiBayerArray(camera) - camera.capture(stream, "jpeg", bayer=True) - output_bayer = np.sum(stream.array, axis=2).astype(np.uint16) - - # convert to RGB - output = bayer2rgb_cc( - output_bayer, - down=down, - nbits=12, - blue_gain=float(g[1]), - red_gain=float(g[0]), - black_level=RPI_HQ_CAMERA_BLACK_LEVEL, - ccm=RPI_HQ_CAMERA_CCM_MATRIX, - nbits_out=8, - ) - - # if down: - # output = resize( - # output[None, ...], factor=1 / down, interpolation=cv2.INTER_CUBIC - # )[0] - - # save image - save_image(output, output_fp, normalize=False) - - # print range - print(f"{output_fp}, range: {output.min()} - {output.max()}") - - n_tries += 1 - if n_tries > MAX_TRIES: - print("Max number of tries reached!") - break - - max_pixel_val = output.max() - if max_pixel_val < MIN_LEVEL: - - # increase exposure - current_shutter_speed = int(current_shutter_speed * fact_increase) - camera.shutter_speed = current_shutter_speed - time.sleep(config.capture.config_pause) - - print(f"increasing shutter speed to [desired] {current_shutter_speed} [actual] {camera.shutter_speed}") - - elif max_pixel_val > MAX_LEVEL: - - if current_shutter_speed > 13098: # TODO: minimum for RPi HQ - # decrease exposure - current_shutter_speed = int(current_shutter_speed / fact_decrease) - camera.shutter_speed = current_shutter_speed - time.sleep(config.capture.config_pause) - print(f"decreasing shutter speed to [desired] {current_shutter_speed} [actual] {camera.shutter_speed}") - - else: - - # decrease screen brightness - current_screen_brightness = current_screen_brightness - 10 - screen_res = np.array(config.display.screen_res) - hshift = config.display.hshift - vshift = config.display.vshift - pad = config.display.pad - brightness = current_screen_brightness - display_image_path = config.display.output_fp - rot90 = config.display.rot90 - - display_command = f"python scripts/measure/prep_display_image.py --fp {_file} --output_path {display_image_path} --screen_res {screen_res[0]} {screen_res[1]} --hshift {hshift} --vshift {vshift} --pad {pad} --brightness {brightness} --rot90 {rot90}" - if config.display.landscape: - display_command += " --landscape" - if config.display.image_res is not None: - display_command += f" --image_res {config.display.image_res[0]} {config.display.image_res[1]}" - # print(display_command) - os.system(display_command) - - time.sleep(config.display.delay) - - exposure_vals.append(current_shutter_speed / 1e6) - brightness_vals.append(current_screen_brightness) - n_tries_vals.append(n_tries) if recon is not None: - # normalize and remove background output = output.astype(np.float32) output /= output.max() - output -= bg + output -= bg # TODO implement fancy bg subtraction output = np.clip(output, a_min=0, a_max=output.max()) # set data @@ -366,11 +277,11 @@ def collect_dataset(config): if config.runtime: proc_time = time.time() - start_time if proc_time > runtime_sec: - print(f"-- measured {i+1} / {n_files} files") + print(f"-- measured {i + 1} / {n_files} files") break proc_time = time.time() - start_time - print(f"\nFinished, {proc_time/60.:.3f} minutes.") + print(f"\nFinished, {proc_time / 60.:.3f} minutes.") # print brightness and exposure range and average print(f"brightness range: {np.min(brightness_vals)} - {np.max(brightness_vals)}") @@ -381,5 +292,130 @@ def collect_dataset(config): print(f"n_tries average: {np.mean(n_tries_vals)}") +def capture_screen(MAX_LEVEL, MAX_TRIES, MIN_LEVEL, _file, brightness_vals, camera, config, down, exposure_vals, g, i, + init_brightness, init_shutter_speed, mask_dir, n_tries_vals, output_fp, start_idx): + if not config.capture.skip: + + # -- set mask pattern + if config.masks is not None: + mask_idx = (i + start_idx) % config.masks.n + mask_fp = mask_dir / f"mask_{mask_idx}.npy" + print("using mask: ", mask_fp) + mask_vals = np.load(mask_fp) + full_pattern = adafruit_sub2full( + mask_vals, + center=config.masks.center, + ) + set_programmable_mask(full_pattern, device=config.masks.device) + + # -- take picture + max_pixel_val = 0 + fact_increase = config.capture.fact_increase + fact_decrease = config.capture.fact_decrease + n_tries = 0 + + camera.shutter_speed = int(init_shutter_speed) + time.sleep(config.capture.config_pause) + current_shutter_speed = camera.shutter_speed + + current_screen_brightness = init_brightness + print(f"current shutter speed: {current_shutter_speed}") + print(f"current screen brightness: {current_screen_brightness}") + + while max_pixel_val < MIN_LEVEL or max_pixel_val > MAX_LEVEL: + + # get bayer data + stream = picamerax.array.PiBayerArray(camera) + camera.capture(stream, "jpeg", bayer=True) + output_bayer = np.sum(stream.array, axis=2).astype(np.uint16) + + # convert to RGB + output = bayer2rgb_cc( + output_bayer, + down=down, + nbits=12, + blue_gain=float(g[1]), + red_gain=float(g[0]), + black_level=RPI_HQ_CAMERA_BLACK_LEVEL, + ccm=RPI_HQ_CAMERA_CCM_MATRIX, + nbits_out=8, + ) + + # if down: + # output = resize( + # output[None, ...], factor=1 / down, interpolation=cv2.INTER_CUBIC + # )[0] + + # save image + save_image(output, output_fp, normalize=False) + + # print range + print(f"{output_fp}, range: {output.min()} - {output.max()}") + + n_tries += 1 + if n_tries > MAX_TRIES: + if MAX_TRIES != 0: + print("Max number of tries reached!") + break + + max_pixel_val = output.max() + + if max_pixel_val < MIN_LEVEL: + + # increase exposure + current_shutter_speed = int(current_shutter_speed * fact_increase) + camera.shutter_speed = current_shutter_speed + time.sleep(config.capture.config_pause) + + print(f"increasing shutter speed to [desired] {current_shutter_speed} [actual] {camera.shutter_speed}") + + elif max_pixel_val > MAX_LEVEL: + if current_shutter_speed > 13098: # TODO: minimum for RPi HQ + # decrease exposure + current_shutter_speed = int(current_shutter_speed / fact_decrease) + camera.shutter_speed = current_shutter_speed + time.sleep(config.capture.config_pause) + print(f"decreasing shutter speed to [desired] {current_shutter_speed} [actual] {camera.shutter_speed}") + + else: + + # decrease screen brightness + current_screen_brightness = current_screen_brightness - 10 + display_img(_file, config, current_screen_brightness) + + exposure_vals.append(current_shutter_speed / 1e6) + brightness_vals.append(current_screen_brightness) + n_tries_vals.append(n_tries) + return output, current_shutter_speed, current_screen_brightness, camera + + +def display_img(_file, config, brightness): + if _file is None: + point_source = np.zeros(tuple(config.display.screen_res) + (3,)) + fp = "tmp_display.png" + im = Image.fromarray(point_source.astype("uint8"), "RGB") + im.save(fp) + _file = fp + + # -- show on display + screen_res = np.array(config.display.screen_res) + hshift = config.display.hshift + vshift = config.display.vshift + pad = config.display.pad + + display_image_path = config.display.output_fp + rot90 = config.display.rot90 + display_command = f"python scripts/measure/prep_display_image.py --fp {_file} --output_path {display_image_path} --screen_res {screen_res[0]} {screen_res[1]} --hshift {hshift} --vshift {vshift} --pad {pad} --brightness {brightness} --rot90 {rot90}" + if config.display.landscape: + display_command += " --landscape" + if config.display.image_res is not None: + display_command += ( + f" --image_res {config.display.image_res[0]} {config.display.image_res[1]}" + ) + # print(display_command) + os.system(display_command) + time.sleep(config.display.delay) + + if __name__ == "__main__": collect_dataset() diff --git a/scripts/recon/train_learning_based.py b/scripts/recon/train_learning_based.py index 50dc50fb..3d41e20c 100644 --- a/scripts/recon/train_learning_based.py +++ b/scripts/recon/train_learning_based.py @@ -31,7 +31,7 @@ import wandb import logging import hydra -from hydra.utils import get_original_cwd +from hydra.utils import get_original_cwd, to_absolute_path import os import numpy as np import time @@ -228,7 +228,9 @@ def train_learned(config): display_res=config.files.image_res, alignment=config.alignment, bg_snr_range=config.files.background_snr_range, # TODO check if correct - bg_fp=config.files.background_fp, + bg_fp=to_absolute_path(config.files.background_fp) + if config.files.background_fp is not None + else None, ) else: @@ -252,7 +254,9 @@ def train_learned(config): simulate_lensless=config.files.simulate_lensless, random_flip=config.files.random_flip, bg_snr_range=config.files.background_snr_range, - bg_fp=config.files.background_fp, + bg_fp=to_absolute_path(config.files.background_fp) + if config.files.background_fp is not None + else None, ) test_set = HFDataset( @@ -272,7 +276,9 @@ def train_learned(config): n_files=config.files.n_files, simulation_config=config.simulation, bg_snr_range=config.files.background_snr_range, - bg_fp=config.files.background_fp, + bg_fp=to_absolute_path(config.files.background_fp) + if config.files.background_fp is not None + else None, force_rgb=config.files.force_rgb, simulate_lensless=False, # in general evaluate on measured (set to False) ) @@ -380,6 +386,9 @@ def train_learned(config): if config.files.random_rotate or config.files.random_shifts: save_image(psf_recon[0].cpu().numpy(), f"psf_{_idx}.png") + save_image(lensed[0].cpu().numpy(), f"lensed_{_idx}.png") + save_image(lensless[0].cpu().numpy(), f"lensless_raw_{_idx}.png") + # Reconstruct and plot image reconstruct_save( _idx, @@ -396,7 +405,6 @@ def train_learned(config): rotate_angle, shift, ) - save_image(lensed[0].cpu().numpy(), f"lensed_{_idx}.png") # save_image(lensed, f"lensed_{_idx}.png") if test_set.bg_sim is not None or test_set.measured_bg: # Reconstruct and plot background subtracted image @@ -719,9 +727,6 @@ def reconstruct_save( res_np = res_np / res_np.max() lensed_np = lensed[0] # .cpu().numpy() - lensless_np = lensless.cpu().numpy() # [0]#.cpu().numpy() - save_image(lensless_np, f"lensless_raw_{_idx}.png") - # -- plot lensed and res on top of each other cropped = False if hasattr(test_set, "alignment"):