Skip to content

Commit

Permalink
Merge branch 'main' into trainable_amplitude_mask
Browse files Browse the repository at this point in the history
  • Loading branch information
ebezzam committed Feb 22, 2024
2 parents 6335490 + 467c927 commit 689a2b1
Show file tree
Hide file tree
Showing 51 changed files with 3,597 additions and 276 deletions.
26 changes: 26 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,24 @@ Unreleased
Added
~~~~~

- Script to upload measured datasets to Hugging Face: ``scripts/data/upload_dataset_huggingface.py``

Changed
~~~~~

- Dataset reconstruction script uses datasets from Hugging Face: ``scripts/recon/dataset.py``

Bugfix
~~~~~

- Nothing

1.0.6 - (2024-02-21)
--------------------

Added
~~~~~

- Trainable reconstruction can return intermediate outputs (between pre- and post-processing).
- Auto-download for DRUNet model.
- ``utils.dataset.DiffuserCamMirflickr`` helper class for Mirflickr dataset.
Expand All @@ -23,6 +41,13 @@ Added
- Option to freeze/unfreeze/add pre- and post-processor components during training.
- Option to skip unrolled training and just use U-Net.
- Dataset objects for Adafruit LCD: measured CelebA and hardware-in-the-loop.
- Option to add auxiliary loss from output of camera inversion.
- Option to specify denoiser to iterative methods for plug-and-play.
- Model repository of trained models in ``lensless.recon.model_dict``.
- TrainableInversion component as in FlatNet.
- ``lensless.recon.utils.get_drunet_function_v2`` which doesn't normalize each color channel.
- Option to add noise to DiffuserCamMirflickr dataset.
- Option to initialize pre- and post-processor with components from another model.

Changed
~~~~~~~
Expand All @@ -37,6 +62,7 @@ Bugfix
- Fix bad train/test split for DiffuserCamMirflickr in unrolled training.
- Resize utility.
- Aperture, index to dimension conversion.
- Submodule imports.


1.0.5 - (2023-09-05)
Expand Down
9 changes: 9 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,15 @@ LenslessPiCam
:alt: Downloads


.. image:: https://colab.research.google.com/assets/colab-badge.svg
:target: https://drive.google.com/drive/folders/1nBDsg86RaZIqQM6qD-612k9v8gDrgdwB?usp=drive_link
:alt: notebooks

.. image:: https://huggingface.co/datasets/huggingface/badges/resolve/main/powered-by-huggingface-dark.svg
:target: https://huggingface.co/bezzam
:alt: huggingface


*A Hardware and Software Toolkit for Lensless Computational Imaging with a Raspberry Pi*
-----------------------------------------------------------------------------------------

Expand Down
13 changes: 13 additions & 0 deletions configs/admm_pnp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# python scripts/recon/admm.py -cn admm_pnp
defaults:
- defaults_recon
- _self_

torch: True
torch_device: 'cuda:1'

admm:
denoiser:
network: DruNet
noise_level: 0.05
use_dual: False
2 changes: 1 addition & 1 deletion configs/benchmark.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ device: "cuda"
# numbers of iterations to benchmark
n_iter_range: [5, 10, 20, 50, 100, 200, 300]
# number of files to benchmark
n_files: 200 # null for all files
n_files: null # null for all files
#How much should the image be downsampled
downsample: 2
#algorithm to benchmark
Expand Down
6 changes: 6 additions & 0 deletions configs/defaults_recon.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@ admm:
mu2: 1e-5
mu3: 4e-5
tau: 0.0001
# PnP
denoiser: null # set to use PnP
# denoiser:
# network: DruNet
# noise_level: 10 # within [0, 255]
# use_dual: False # just for ADMM
#Loading unrolled model
unrolled: false
checkpoint_fp: null
Expand Down
32 changes: 32 additions & 0 deletions configs/recon_celeba_digicam.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# python scripts/recon/dataset.py -cn recon_celeba_digicam
defaults:
- recon_dataset
- _self_

torch: True
torch_device: 'cuda:0'

repo_id: "bezzam/DigiCam-CelebA-10K"
split: "test" # "train", "test", "all"
psf_fn: "psf_measured.png" # in repo root
n_files: 25 # null for all files

preprocess:
flip_ud: True
flip_lr: True
downsample: 6

# to have different data shape than PSF
data_dim: null
# data_dim: [48, 64] # down 64
# data_dim: [506, 676] # down 6

algo: admm # "admm", "apgd", "null" to just copy over (resized) raw data
admm:
n_iter: 10

# extraction region of interest
# roi: null # top, left, bottom, right
# roi: [10, 300, 560, 705] # down 4
roi: [10, 190, 377, 490] # down 6
# roi: [5, 150, 280, 352] # down 8
39 changes: 13 additions & 26 deletions configs/recon_dataset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,42 +6,29 @@ defaults:
torch: True
torch_device: 'cuda:0'

input:
# https://drive.switch.ch/index.php/s/NdgHlcDeHVDH5ww?path=%2Fpsf
psf: data/psf/adafruit_random_2mm_20231907.png
# https://drive.switch.ch/index.php/s/m89D1tFEfktQueS
raw_data: data/celeba_adafruit_random_2mm_20230720_1K

repo_id: "bezzam/DiffuserCam-Lensless-Mirflickr-Dataset"
split: "test" # "train", "test", "all"
psf_fn: "psf.png" # in repo root
output_folder: null # autocreate name if not spe
n_files: 25 # null for all files
output_folder: data/celeba_adafruit_recon

# extraction region of interest
roi: null # top, left, bottom, right
# -- values for `data/celeba_adafruit_random_2mm_20230720_1K`
# roi: [10, 300, 560, 705] # down 4
# roi: [6, 200, 373, 470] # down 6
# roi: [5, 150, 280, 352] # down 8

preprocess:
flip: True
flip_ud: True
flip_lr: False
downsample: 6

# to have different data shape than PSF
data_dim: null
# data_dim: [48, 64] # down 64
# data_dim: [506, 676] # down 6

display:
disp: -1
plot: False

algo: admm # "admm", "apgd", "null" to just copy over (resized) raw data

admm:
n_iter: 100
apgd:
n_jobs: 1 # run in parallel as algo is slow
max_iter: 500

admm:
n_iter: 10
# extraction region of interest
roi: null # top, left, bottom, right

save: False
display:
disp: -1
plot: False
36 changes: 36 additions & 0 deletions configs/recon_diffusercam_mirflickr.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# python scripts/recon/diffusercam_mirflickr.py
# defaults to plug-and-play
defaults:
- defaults_recon
- _self_


files:
dataset: /scratch/bezzam/DiffuserCam_mirflickr/dataset
psf: data/psf/diffusercam_psf.tiff
diffusercam_psf: True
downsample: 2

model_name: null
legacy_denoiser: True

# defaults to plug-and-play
admm:
# Number of iterations
n_iter: 20
# Hyperparameters
mu1: 1e-6
mu2: 1e-5
mu3: 4e-5
tau: 0.0001
# PnP
denoiser:
network: DruNet
noise_level: 0.05
use_dual: False


device: cuda:0
n_trials: 1 # more if you want to get average inference time
idx: 3 # index from test set to reconstruct
save: True
2 changes: 1 addition & 1 deletion configs/sim_digicam_psf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ sim:
flipud: True

# in practice found waveprop=True or False doesn't make difference
waveprop: True
waveprop: False

# below are ignored if waveprop=False
scene2mask: 0.3 # [m]
Expand Down
2 changes: 1 addition & 1 deletion configs/train_celeba_classifier.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ data:

# -- raw
# https://drive.switch.ch/index.php/s/m89D1tFEfktQueS
measured: data/celeba_adafruit_random_2mm_20230720_10K
measured: /scratch/bezzam/celeba_adafruit_random_2mm_20230720_10K
raw: True

# # -- reconstructed
Expand Down
1 change: 0 additions & 1 deletion configs/train_celeba_digicam.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ files:
celeba_root: /scratch/bezzam

test_idx: [0, 1, 2, 3, 4]
# test_idx: [1000, 2000, 3000, 4000]

# for prepping ground truth data
simulation:
Expand Down
111 changes: 111 additions & 0 deletions configs/train_celeba_digicam_hitl.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# Learn mask with HITL training by setting measure configuration (set to null for learning in simulation)
#
# EXAMPLE COMMAND:
# python scripts/recon/train_unrolled.py -cn train_celeba_digicam_hitl measure.rpi_username=USERNAME measure.rpi_hostname=HOSTNAME files.vertical_shift=SHIFT

defaults:
- train_celeba_digicam
- _self_

# Train Dataset
files:

dataset: CelebA
celeba_root: /scratch/bezzam
n_files: 1000

downsample: 8
# TODO: set appropriately to align
vertical_shift: -520
horizontal_shift: null
crop:
vertical: [0, 2000]
horizontal: [1200, 2800]

measure:
# TODO: set for device
rpi_username: null
rpi_hostname: null

display:
# default to this screen: https://www.dell.com/en-us/work/shop/dell-ultrasharp-usb-c-hub-monitor-u2421e/apd/210-axmg/monitors-monitor-accessories#techspecs_section
screen_res: [1920, 1200] # width, height
pad: 10
hshift: 0
vshift: -18
brightness: 100
rot90: 3

capture:

# NB: not being used
max_level: 254
min_level: 150
max_tries: 4
delay: 2

sensor: rpi_hq
gamma: null # for visualization
exp: 0.8
script: ~/LenslessPiCam/scripts/measure/on_device_capture.py
iso: 100
config_pause: 1
sensor_mode: "0"
nbits_out: 8
nbits_capture: 12
legacy: True
gray: False
fn: raw_data
bayer: True
awb_gains: [1.6, 1.2]
rgb: True
down: 8
flip: True


# for prepping ground truth data
simulation:
scene2mask: 0.3 # [m]
mask2sensor: 0.002 # [m]
object_height: 0.38 # [m]
snr_db: 5
downsample: null
random_vflip: False
random_hflip: False
quantize: False
flip: False

#Training
training:
batch_size: 4
epoch: 10
eval_batch_size: 1
crop_preloss: True
save_every: 1

#Trainable Mask
trainable_mask:
mask_type: AdafruitLCD #Null or "TrainablePSF" or "AdafruitLCD"
# "random" (with shape of config.files.psf) or path to npy file
grayscale: False
mask_lr: 1e-3
L1_strength: False
min_val: 0

train_mask_vals: True
train_color_filter: True

# -- only for AdafruitLCD
initial_value: data/psf/adafruit_random_pattern_20231107_150902.npy
ap_center: [57, 77]
ap_shape: [18, 26]
rotate: 0 # rotation in degrees
# to align with measured PSF (so reconstruction also aligned)
vertical_shift: 0 # [px]
horizontal_shift: 0 # [px]

slm: adafruit
sensor: rpi_hq
flipud: True
waveprop: True # TODO: remove if too slow

3 changes: 0 additions & 3 deletions configs/train_pre-post-processing.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@ defaults:
- train_unrolledADMM
- _self_

display:
disp: 400

reconstruction:
method: unrolled_admm

Expand Down
Loading

0 comments on commit 689a2b1

Please sign in to comment.