Skip to content

Commit

Permalink
style: ⚡️ Update logging statements to use print instead of logger.info
Browse files Browse the repository at this point in the history
  • Loading branch information
rhoadesScholar committed Mar 11, 2024
1 parent 42e7a10 commit ab0a549
Show file tree
Hide file tree
Showing 22 changed files with 71 additions and 83 deletions.
16 changes: 7 additions & 9 deletions dacapo/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def apply(
), "Either validation_dataset and criterion, or iteration must be provided."

# retrieving run
logger.info("Loading run %s", run_name)
print("Loading run %s", run_name)
config_store = create_config_store()
run_config = config_store.retrieve_run_config(run_name)
run = Run(run_config)
Expand All @@ -70,7 +70,7 @@ def apply(
# load weights
if iteration is None:
iteration = weights_store.retrieve_best(run_name, validation_dataset, criterion) # type: ignore
logger.info("Loading weights for iteration %i", iteration)
print("Loading weights for iteration %i", iteration)
weights_store.retrieve_weights(run_name, iteration)

if parameters is None:
Expand All @@ -89,9 +89,7 @@ def apply(
raise ValueError(
"validation_dataset must be a dataset name or a Dataset object, or parameters must be provided explicitly."
)
logger.info(
"Finding best parameters for validation dataset %s", _validation_dataset
)
print("Finding best parameters for validation dataset %s", _validation_dataset)
parameters = run.task.evaluator.get_overall_best_parameters(
_validation_dataset, criterion
)
Expand Down Expand Up @@ -151,7 +149,7 @@ def apply(
output_container, f"output_{run_name}_{iteration}_{parameters}"
)

logger.info(
print(
"Applying best results from run %s at iteration %i to dataset %s",
run.name,
iteration,
Expand Down Expand Up @@ -186,7 +184,7 @@ def apply_run(
"""Apply the model to a dataset. If roi is None, the whole input dataset is used. Assumes model is already loaded."""

# render prediction dataset
logger.info("Predicting on dataset %s", prediction_array_identifier)
print("Predicting on dataset %s", prediction_array_identifier)
predict(
run.name,
iteration,
Expand All @@ -200,10 +198,10 @@ def apply_run(
)

# post-process the output
logger.info("Post-processing output to dataset %s", output_array_identifier)
print("Post-processing output to dataset %s", output_array_identifier)
post_processor = run.task.post_processor
post_processor.set_prediction(prediction_array_identifier)
post_processor.process(parameters, output_array_identifier, num_workers=num_workers)

logger.info("Done")
print("Done")
return
2 changes: 1 addition & 1 deletion dacapo/blockwise/argmax_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def start_worker(
client = daisy.Client()

while True:
logger.info("getting block")
print("getting block")
with client.acquire_block() as block:
if block is None:
break
Expand Down
18 changes: 8 additions & 10 deletions dacapo/blockwise/empanada_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def orthoplane_inference(engine, volume):
# report instances per class
for tracker in trackers:
class_id = tracker.class_id
logger.info(
print(
f"Class {class_id}, axis {axis_name}, has {len(tracker.instances.keys())} instances"
)

Expand Down Expand Up @@ -153,7 +153,7 @@ def start_postprocess_worker(*args):
min_extent=min_extent,
dtype=engine.dtype,
):
logger.info(f"Yielding {class_name} volume of shape {vol.shape}")
print(f"Yielding {class_name} volume of shape {vol.shape}")
yield vol, class_name, tracker

def start_consensus_worker(trackers_dict):
Expand All @@ -166,7 +166,7 @@ def start_consensus_worker(trackers_dict):
min_extent=min_extent,
dtype=engine.dtype,
):
logger.info(f"Yielding {class_name} volume of shape {vol.shape}")
print(f"Yielding {class_name} volume of shape {vol.shape}")
yield vol, class_name, tracker

# verify that the image doesn't have extraneous channel dimensions
Expand All @@ -182,7 +182,7 @@ def start_consensus_worker(trackers_dict):
else:
raise Exception(f"Image volume must be 3D, got image of shape {shape}")

logger.info(
print(
f"Got 4D image of shape {shape}, extracted single channel of size {image.shape}"
)

Expand Down Expand Up @@ -210,7 +210,7 @@ def stack_postprocessing(

# create the final instance segmentations
for class_id, class_name in class_names.items():
logger.info(f"Creating stack segmentation for class {class_name}...")
print(f"Creating stack segmentation for class {class_name}...")

class_tracker = get_axis_trackers_by_class(trackers, class_id)[0]
shape3d = class_tracker.shape3d
Expand All @@ -224,7 +224,7 @@ def stack_postprocessing(
filters.remove_small_objects(stack_tracker, min_size=min_size)
filters.remove_pancakes(stack_tracker, min_span=min_extent)

logger.info(f"Total {class_name} objects {len(stack_tracker.instances.keys())}")
print(f"Total {class_name} objects {len(stack_tracker.instances.keys())}")

# decode and fill the instances
stack_vol = np.zeros(shape3d, dtype=dtype)
Expand Down Expand Up @@ -254,7 +254,7 @@ def tracker_consensus(
# create the final instance segmentations
for class_id, class_name in class_names.items():
# get the relevant trackers for the class_label
logger.info(f"Creating consensus segmentation for class {class_name}...")
print(f"Creating consensus segmentation for class {class_name}...")

class_trackers = get_axis_trackers_by_class(trackers, class_id)
shape3d = class_trackers[0].shape3d
Expand All @@ -271,9 +271,7 @@ def tracker_consensus(
class_trackers, pixel_vote_thr
)

logger.info(
f"Total {class_name} objects {len(consensus_tracker.instances.keys())}"
)
print(f"Total {class_name} objects {len(consensus_tracker.instances.keys())}")

# decode and fill the instances
consensus_vol = np.zeros(shape3d, dtype=dtype)
Expand Down
6 changes: 2 additions & 4 deletions dacapo/blockwise/predict_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,7 @@ def start_worker(
input_size = input_voxel_size * input_shape
output_size = output_voxel_size * model.compute_output_shape(input_shape)[1]

logger.info(
"Predicting with input size %s, output size %s", input_size, output_size
)
print("Predicting with input size %s, output size %s", input_size, output_size)

# create gunpowder keys

Expand Down Expand Up @@ -181,7 +179,7 @@ def start_worker(
if block is None:
return

logger.info("Processing block %s", block)
print("Processing block %s", block)

chunk_request = request.copy()
chunk_request[raw].roi = block.read_roi
Expand Down
8 changes: 4 additions & 4 deletions dacapo/blockwise/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def run_blockwise(
**kwargs,
)

logger.info("Running blockwise with worker_file: ", worker_file)
print("Running blockwise with worker_file: ", worker_file)
success = daisy.run_blockwise([task])
return success

Expand Down Expand Up @@ -159,7 +159,7 @@ def segment_blockwise(
options.runs_base_dir.mkdir(parents=True)
tmpdir = tempfile.mkdtemp(dir=options.runs_base_dir)

logger.info(
print(
"Running blockwise segmentation, with segment_function_file: ",
segment_function_file,
" in temp directory: ",
Expand All @@ -185,7 +185,7 @@ def segment_blockwise(
*args,
**kwargs,
)
logger.info(
print(
"Running blockwise segmentation with worker_file: ",
str(Path(Path(dacapo.blockwise.__file__).parent, "segment_worker.py")),
)
Expand All @@ -209,7 +209,7 @@ def segment_blockwise(
*args,
**kwargs,
)
logger.info(
print(
"Running blockwise relabeling with worker_file: ",
str(Path(Path(dacapo.blockwise.__file__).parent, "relabel_worker.py")),
)
Expand Down
14 changes: 6 additions & 8 deletions dacapo/blockwise/segment_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,21 +60,21 @@ def start_worker(
function_path (str): The path to the segment function.
"""

logger.info("Starting worker")
print("Starting worker")
# get arrays
input_array_identifier = LocalArrayIdentifier(Path(input_container), input_dataset)
logger.info(f"Opening input array {input_array_identifier}")
print(f"Opening input array {input_array_identifier}")
input_array = ZarrArray.open_from_array_identifier(input_array_identifier)

output_array_identifier = LocalArrayIdentifier(
Path(output_container), output_dataset
)
logger.info(f"Opening output array {output_array_identifier}")
print(f"Opening output array {output_array_identifier}")
output_array = ZarrArray.open_from_array_identifier(output_array_identifier)

# Load segment function
function_name = Path(function_path).stem
logger.info(f"Loading segment function from {str(function_path)}")
print(f"Loading segment function from {str(function_path)}")
function = SourceFileLoader(function_name, str(function_path)).load_module()
segment_function = function.segment_function

Expand All @@ -86,9 +86,7 @@ def start_worker(

# load parameters saved in tmpdir
if os.path.exists(os.path.join(tmpdir, "parameters.yaml")):
logger.info(
f"Loading parameters from {os.path.join(tmpdir, 'parameters.yaml')}"
)
print(f"Loading parameters from {os.path.join(tmpdir, 'parameters.yaml')}")
with open(os.path.join(tmpdir, "parameters.yaml"), "r") as f:
parameters.update(yaml.safe_load(f))

Expand Down Expand Up @@ -169,7 +167,7 @@ def start_worker(
edges = unique_pairs[non_zero_filter]
nodes = np.unique(edges)

logger.info(f"Writing ids to {os.path.join(tmpdir, 'block_%d.npz')}")
print(f"Writing ids to {os.path.join(tmpdir, 'block_%d.npz')}")
assert os.path.exists(tmpdir)
with open(
os.path.join(tmpdir, f"block_{block.block_id[1]}.npz"), "wb"
Expand Down
2 changes: 1 addition & 1 deletion dacapo/blockwise/threshold_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def start_worker(
client = daisy.Client()

while True:
logger.info("getting block")
print("getting block")
with client.acquire_block() as block:
if block is None:
break
Expand Down
8 changes: 4 additions & 4 deletions dacapo/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def segment_blockwise(
np.uint64,
overwrite=overwrite,
)
logger.info(
print(
f"Created output array {output_array_identifier.container}:{output_array_identifier.dataset} with ROI {_total_roi}."
)

Expand All @@ -406,7 +406,7 @@ def segment_blockwise(


def unpack_ctx(ctx):
# logger.info(ctx.args)
# print(ctx.args)
kwargs = {
ctx.args[i].lstrip("-"): ctx.args[i + 1] for i in range(0, len(ctx.args), 2)
}
Expand All @@ -415,8 +415,8 @@ def unpack_ctx(ctx):
kwargs[k] = int(v)
elif v.replace(".", "").isnumeric():
kwargs[k] = float(v)
logger.info(f"{k}: {kwargs[k]}")
# logger.info(f"{type(k)}: {k} --> {type(kwargs[k])} {kwargs[k]}")
print(f"{k}: {kwargs[k]}")
# print(f"{type(k)}: {k} --> {type(kwargs[k])} {kwargs[k]}")
return kwargs


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def __getitem__(self, roi: Roi) -> np.ndarray:
axis=0,
)
if concatenated.shape[0] == 1:
logger.info(
print(
f"Concatenated array has only one channel: {self.name} {concatenated.shape}"
)
return concatenated
2 changes: 1 addition & 1 deletion dacapo/experiments/starts/start.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def initialize_weights(self, model):

weights_store = create_weights_store()
weights = weights_store._retrieve_weights(self.run, self.criterion)
logger.info(f"loading weights from run {self.run}, criterion: {self.criterion}")
print(f"loading weights from run {self.run}, criterion: {self.criterion}")
# load the model weights (taken from torch load_state_dict source)
try:
model.load_state_dict(weights.model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def evaluate(self, output_array_identifier, evaluation_array):
output_array = ZarrArray.open_from_array_identifier(output_array_identifier)
evaluation_data = evaluation_array[evaluation_array.roi]
output_data = output_array[output_array.roi]
logger.info(
print(
f"Evaluating binary segmentations on evaluation_data of shape: {evaluation_data.shape}"
)
assert (
Expand Down
2 changes: 1 addition & 1 deletion dacapo/experiments/trainers/gunpowder_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def build_batch_provider(self, datasets, model, task, snapshot_container=None):
def iterate(self, num_iterations, model, optimizer, device):
t_start_fetch = time.time()

logger.info("Starting iteration!")
print("Starting iteration!")

for iteration in range(self.iteration, self.iteration + num_iterations):
raw, gt, target, weight, mask = self.next()
Expand Down
12 changes: 6 additions & 6 deletions dacapo/gp/elastic_augment_fuse.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ def _create_rotation_transformation(shape, angle, subsample=1, voxel_size=None):
# rotate control points
center = np.array([0.5 * (d - 1) * vs for d, vs in zip(shape, voxel_size)])

# logger.info("Creating rotation transformation with:")
# logger.info("\tangle : " + str(angle))
# logger.info("\tcenter: " + str(center))
# print("Creating rotation transformation with:")
# print("\tangle : " + str(angle))
# print("\tcenter: " + str(center))

control_point_offsets = np.zeros((dims,) + control_points, dtype=np.float32)
for control_point in np.ndindex(control_points):
Expand Down Expand Up @@ -116,9 +116,9 @@ def _create_uniform_3d_transformation(shape, rotation, subsample=1, voxel_size=N
# rotate control points
center = np.array([0.5 * (d - 1) * vs for d, vs in zip(shape, voxel_size)])

# logger.info("Creating rotation transformation with:")
# logger.info("\tangle : " + str(angle))
# logger.info("\tcenter: " + str(center))
# print("Creating rotation transformation with:")
# print("\tangle : " + str(angle))
# print("\tcenter: " + str(center))

control_point_offsets = np.zeros((dims,) + control_points, dtype=np.float32)
for control_point in np.ndindex(control_points):
Expand Down
8 changes: 3 additions & 5 deletions dacapo/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,9 @@ def predict(
if isinstance(output_dtype, str):
output_dtype = np.dtype(output_dtype)

logger.info(
"Predicting with input size %s, output size %s", input_size, output_size
)
print("Predicting with input size %s, output size %s", input_size, output_size)

logger.info("Total input ROI: %s, output ROI: %s", _input_roi, output_roi)
print("Total input ROI: %s, output ROI: %s", _input_roi, output_roi)

# prepare prediction dataset
axes = ["c"] + [axis for axis in raw_array.axes if axis != "c"]
Expand All @@ -126,7 +124,7 @@ def predict(

# run blockwise prediction
worker_file = str(Path(Path(dacapo.blockwise.__file__).parent, "predict_worker.py"))
logger.info("Running blockwise prediction with worker_file: ", worker_file)
print("Running blockwise prediction with worker_file: ", worker_file)
run_blockwise(
worker_file=worker_file,
total_roi=_input_roi,
Expand Down
2 changes: 1 addition & 1 deletion dacapo/store/file_config_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class FileConfigStore(ConfigStore):
"""

def __init__(self, path):
logger.info("Creating FileConfigStore:\n\tpath : %s", path)
print("Creating FileConfigStore:\n\tpath : %s", path)

self.path = Path(path)

Expand Down
Loading

0 comments on commit ab0a549

Please sign in to comment.