From ab0a549047aa63211a406028452ed5e4294e1def Mon Sep 17 00:00:00 2001 From: rhoadesScholar Date: Mon, 11 Mar 2024 10:56:20 -0400 Subject: [PATCH] =?UTF-8?q?style:=20=E2=9A=A1=EF=B8=8F=20Update=20logging?= =?UTF-8?q?=20statements=20to=20use=20print=20instead=20of=20logger.info?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dacapo/apply.py | 16 +++++++--------- dacapo/blockwise/argmax_worker.py | 2 +- dacapo/blockwise/empanada_function.py | 18 ++++++++---------- dacapo/blockwise/predict_worker.py | 6 ++---- dacapo/blockwise/scheduler.py | 8 ++++---- dacapo/blockwise/segment_worker.py | 14 ++++++-------- dacapo/blockwise/threshold_worker.py | 2 +- dacapo/cli.py | 8 ++++---- .../datasplits/datasets/arrays/concat_array.py | 2 +- dacapo/experiments/starts/start.py | 2 +- .../binary_segmentation_evaluator.py | 2 +- .../experiments/trainers/gunpowder_trainer.py | 2 +- dacapo/gp/elastic_augment_fuse.py | 12 ++++++------ dacapo/predict.py | 8 +++----- dacapo/store/file_config_store.py | 2 +- dacapo/store/file_stats_store.py | 6 +++--- dacapo/store/local_array_store.py | 2 +- dacapo/store/local_weights_store.py | 8 ++++---- dacapo/store/mongo_config_store.py | 2 +- dacapo/store/mongo_stats_store.py | 6 +++--- dacapo/train.py | 12 ++++++------ dacapo/validate.py | 14 ++++++-------- 22 files changed, 71 insertions(+), 83 deletions(-) diff --git a/dacapo/apply.py b/dacapo/apply.py index 9e6006c9b..60f754609 100644 --- a/dacapo/apply.py +++ b/dacapo/apply.py @@ -59,7 +59,7 @@ def apply( ), "Either validation_dataset and criterion, or iteration must be provided." # retrieving run - logger.info("Loading run %s", run_name) + print("Loading run %s", run_name) config_store = create_config_store() run_config = config_store.retrieve_run_config(run_name) run = Run(run_config) @@ -70,7 +70,7 @@ def apply( # load weights if iteration is None: iteration = weights_store.retrieve_best(run_name, validation_dataset, criterion) # type: ignore - logger.info("Loading weights for iteration %i", iteration) + print("Loading weights for iteration %i", iteration) weights_store.retrieve_weights(run_name, iteration) if parameters is None: @@ -89,9 +89,7 @@ def apply( raise ValueError( "validation_dataset must be a dataset name or a Dataset object, or parameters must be provided explicitly." ) - logger.info( - "Finding best parameters for validation dataset %s", _validation_dataset - ) + print("Finding best parameters for validation dataset %s", _validation_dataset) parameters = run.task.evaluator.get_overall_best_parameters( _validation_dataset, criterion ) @@ -151,7 +149,7 @@ def apply( output_container, f"output_{run_name}_{iteration}_{parameters}" ) - logger.info( + print( "Applying best results from run %s at iteration %i to dataset %s", run.name, iteration, @@ -186,7 +184,7 @@ def apply_run( """Apply the model to a dataset. If roi is None, the whole input dataset is used. Assumes model is already loaded.""" # render prediction dataset - logger.info("Predicting on dataset %s", prediction_array_identifier) + print("Predicting on dataset %s", prediction_array_identifier) predict( run.name, iteration, @@ -200,10 +198,10 @@ def apply_run( ) # post-process the output - logger.info("Post-processing output to dataset %s", output_array_identifier) + print("Post-processing output to dataset %s", output_array_identifier) post_processor = run.task.post_processor post_processor.set_prediction(prediction_array_identifier) post_processor.process(parameters, output_array_identifier, num_workers=num_workers) - logger.info("Done") + print("Done") return diff --git a/dacapo/blockwise/argmax_worker.py b/dacapo/blockwise/argmax_worker.py index 77d932f39..59a17d752 100644 --- a/dacapo/blockwise/argmax_worker.py +++ b/dacapo/blockwise/argmax_worker.py @@ -61,7 +61,7 @@ def start_worker( client = daisy.Client() while True: - logger.info("getting block") + print("getting block") with client.acquire_block() as block: if block is None: break diff --git a/dacapo/blockwise/empanada_function.py b/dacapo/blockwise/empanada_function.py index 911d8dbc2..4175f8577 100644 --- a/dacapo/blockwise/empanada_function.py +++ b/dacapo/blockwise/empanada_function.py @@ -74,7 +74,7 @@ def orthoplane_inference(engine, volume): # report instances per class for tracker in trackers: class_id = tracker.class_id - logger.info( + print( f"Class {class_id}, axis {axis_name}, has {len(tracker.instances.keys())} instances" ) @@ -153,7 +153,7 @@ def start_postprocess_worker(*args): min_extent=min_extent, dtype=engine.dtype, ): - logger.info(f"Yielding {class_name} volume of shape {vol.shape}") + print(f"Yielding {class_name} volume of shape {vol.shape}") yield vol, class_name, tracker def start_consensus_worker(trackers_dict): @@ -166,7 +166,7 @@ def start_consensus_worker(trackers_dict): min_extent=min_extent, dtype=engine.dtype, ): - logger.info(f"Yielding {class_name} volume of shape {vol.shape}") + print(f"Yielding {class_name} volume of shape {vol.shape}") yield vol, class_name, tracker # verify that the image doesn't have extraneous channel dimensions @@ -182,7 +182,7 @@ def start_consensus_worker(trackers_dict): else: raise Exception(f"Image volume must be 3D, got image of shape {shape}") - logger.info( + print( f"Got 4D image of shape {shape}, extracted single channel of size {image.shape}" ) @@ -210,7 +210,7 @@ def stack_postprocessing( # create the final instance segmentations for class_id, class_name in class_names.items(): - logger.info(f"Creating stack segmentation for class {class_name}...") + print(f"Creating stack segmentation for class {class_name}...") class_tracker = get_axis_trackers_by_class(trackers, class_id)[0] shape3d = class_tracker.shape3d @@ -224,7 +224,7 @@ def stack_postprocessing( filters.remove_small_objects(stack_tracker, min_size=min_size) filters.remove_pancakes(stack_tracker, min_span=min_extent) - logger.info(f"Total {class_name} objects {len(stack_tracker.instances.keys())}") + print(f"Total {class_name} objects {len(stack_tracker.instances.keys())}") # decode and fill the instances stack_vol = np.zeros(shape3d, dtype=dtype) @@ -254,7 +254,7 @@ def tracker_consensus( # create the final instance segmentations for class_id, class_name in class_names.items(): # get the relevant trackers for the class_label - logger.info(f"Creating consensus segmentation for class {class_name}...") + print(f"Creating consensus segmentation for class {class_name}...") class_trackers = get_axis_trackers_by_class(trackers, class_id) shape3d = class_trackers[0].shape3d @@ -271,9 +271,7 @@ def tracker_consensus( class_trackers, pixel_vote_thr ) - logger.info( - f"Total {class_name} objects {len(consensus_tracker.instances.keys())}" - ) + print(f"Total {class_name} objects {len(consensus_tracker.instances.keys())}") # decode and fill the instances consensus_vol = np.zeros(shape3d, dtype=dtype) diff --git a/dacapo/blockwise/predict_worker.py b/dacapo/blockwise/predict_worker.py index 462d17366..68a2eec1a 100644 --- a/dacapo/blockwise/predict_worker.py +++ b/dacapo/blockwise/predict_worker.py @@ -101,9 +101,7 @@ def start_worker( input_size = input_voxel_size * input_shape output_size = output_voxel_size * model.compute_output_shape(input_shape)[1] - logger.info( - "Predicting with input size %s, output size %s", input_size, output_size - ) + print("Predicting with input size %s, output size %s", input_size, output_size) # create gunpowder keys @@ -181,7 +179,7 @@ def start_worker( if block is None: return - logger.info("Processing block %s", block) + print("Processing block %s", block) chunk_request = request.copy() chunk_request[raw].roi = block.read_roi diff --git a/dacapo/blockwise/scheduler.py b/dacapo/blockwise/scheduler.py index 4b4ff44e8..89bf0ba4d 100644 --- a/dacapo/blockwise/scheduler.py +++ b/dacapo/blockwise/scheduler.py @@ -83,7 +83,7 @@ def run_blockwise( **kwargs, ) - logger.info("Running blockwise with worker_file: ", worker_file) + print("Running blockwise with worker_file: ", worker_file) success = daisy.run_blockwise([task]) return success @@ -159,7 +159,7 @@ def segment_blockwise( options.runs_base_dir.mkdir(parents=True) tmpdir = tempfile.mkdtemp(dir=options.runs_base_dir) - logger.info( + print( "Running blockwise segmentation, with segment_function_file: ", segment_function_file, " in temp directory: ", @@ -185,7 +185,7 @@ def segment_blockwise( *args, **kwargs, ) - logger.info( + print( "Running blockwise segmentation with worker_file: ", str(Path(Path(dacapo.blockwise.__file__).parent, "segment_worker.py")), ) @@ -209,7 +209,7 @@ def segment_blockwise( *args, **kwargs, ) - logger.info( + print( "Running blockwise relabeling with worker_file: ", str(Path(Path(dacapo.blockwise.__file__).parent, "relabel_worker.py")), ) diff --git a/dacapo/blockwise/segment_worker.py b/dacapo/blockwise/segment_worker.py index 0d90a143d..1d01ec0c8 100644 --- a/dacapo/blockwise/segment_worker.py +++ b/dacapo/blockwise/segment_worker.py @@ -60,21 +60,21 @@ def start_worker( function_path (str): The path to the segment function. """ - logger.info("Starting worker") + print("Starting worker") # get arrays input_array_identifier = LocalArrayIdentifier(Path(input_container), input_dataset) - logger.info(f"Opening input array {input_array_identifier}") + print(f"Opening input array {input_array_identifier}") input_array = ZarrArray.open_from_array_identifier(input_array_identifier) output_array_identifier = LocalArrayIdentifier( Path(output_container), output_dataset ) - logger.info(f"Opening output array {output_array_identifier}") + print(f"Opening output array {output_array_identifier}") output_array = ZarrArray.open_from_array_identifier(output_array_identifier) # Load segment function function_name = Path(function_path).stem - logger.info(f"Loading segment function from {str(function_path)}") + print(f"Loading segment function from {str(function_path)}") function = SourceFileLoader(function_name, str(function_path)).load_module() segment_function = function.segment_function @@ -86,9 +86,7 @@ def start_worker( # load parameters saved in tmpdir if os.path.exists(os.path.join(tmpdir, "parameters.yaml")): - logger.info( - f"Loading parameters from {os.path.join(tmpdir, 'parameters.yaml')}" - ) + print(f"Loading parameters from {os.path.join(tmpdir, 'parameters.yaml')}") with open(os.path.join(tmpdir, "parameters.yaml"), "r") as f: parameters.update(yaml.safe_load(f)) @@ -169,7 +167,7 @@ def start_worker( edges = unique_pairs[non_zero_filter] nodes = np.unique(edges) - logger.info(f"Writing ids to {os.path.join(tmpdir, 'block_%d.npz')}") + print(f"Writing ids to {os.path.join(tmpdir, 'block_%d.npz')}") assert os.path.exists(tmpdir) with open( os.path.join(tmpdir, f"block_{block.block_id[1]}.npz"), "wb" diff --git a/dacapo/blockwise/threshold_worker.py b/dacapo/blockwise/threshold_worker.py index a9ab85d0f..3ff08c1e6 100644 --- a/dacapo/blockwise/threshold_worker.py +++ b/dacapo/blockwise/threshold_worker.py @@ -63,7 +63,7 @@ def start_worker( client = daisy.Client() while True: - logger.info("getting block") + print("getting block") with client.acquire_block() as block: if block is None: break diff --git a/dacapo/cli.py b/dacapo/cli.py index 29541a962..22cd453ac 100644 --- a/dacapo/cli.py +++ b/dacapo/cli.py @@ -384,7 +384,7 @@ def segment_blockwise( np.uint64, overwrite=overwrite, ) - logger.info( + print( f"Created output array {output_array_identifier.container}:{output_array_identifier.dataset} with ROI {_total_roi}." ) @@ -406,7 +406,7 @@ def segment_blockwise( def unpack_ctx(ctx): - # logger.info(ctx.args) + # print(ctx.args) kwargs = { ctx.args[i].lstrip("-"): ctx.args[i + 1] for i in range(0, len(ctx.args), 2) } @@ -415,8 +415,8 @@ def unpack_ctx(ctx): kwargs[k] = int(v) elif v.replace(".", "").isnumeric(): kwargs[k] = float(v) - logger.info(f"{k}: {kwargs[k]}") - # logger.info(f"{type(k)}: {k} --> {type(kwargs[k])} {kwargs[k]}") + print(f"{k}: {kwargs[k]}") + # print(f"{type(k)}: {k} --> {type(kwargs[k])} {kwargs[k]}") return kwargs diff --git a/dacapo/experiments/datasplits/datasets/arrays/concat_array.py b/dacapo/experiments/datasplits/datasets/arrays/concat_array.py index 7fd91e08d..37cf650f6 100644 --- a/dacapo/experiments/datasplits/datasets/arrays/concat_array.py +++ b/dacapo/experiments/datasplits/datasets/arrays/concat_array.py @@ -121,7 +121,7 @@ def __getitem__(self, roi: Roi) -> np.ndarray: axis=0, ) if concatenated.shape[0] == 1: - logger.info( + print( f"Concatenated array has only one channel: {self.name} {concatenated.shape}" ) return concatenated diff --git a/dacapo/experiments/starts/start.py b/dacapo/experiments/starts/start.py index da7badbf9..4287eb6cd 100644 --- a/dacapo/experiments/starts/start.py +++ b/dacapo/experiments/starts/start.py @@ -14,7 +14,7 @@ def initialize_weights(self, model): weights_store = create_weights_store() weights = weights_store._retrieve_weights(self.run, self.criterion) - logger.info(f"loading weights from run {self.run}, criterion: {self.criterion}") + print(f"loading weights from run {self.run}, criterion: {self.criterion}") # load the model weights (taken from torch load_state_dict source) try: model.load_state_dict(weights.model) diff --git a/dacapo/experiments/tasks/evaluators/binary_segmentation_evaluator.py b/dacapo/experiments/tasks/evaluators/binary_segmentation_evaluator.py index fafea82a3..bf5d6d983 100644 --- a/dacapo/experiments/tasks/evaluators/binary_segmentation_evaluator.py +++ b/dacapo/experiments/tasks/evaluators/binary_segmentation_evaluator.py @@ -41,7 +41,7 @@ def evaluate(self, output_array_identifier, evaluation_array): output_array = ZarrArray.open_from_array_identifier(output_array_identifier) evaluation_data = evaluation_array[evaluation_array.roi] output_data = output_array[output_array.roi] - logger.info( + print( f"Evaluating binary segmentations on evaluation_data of shape: {evaluation_data.shape}" ) assert ( diff --git a/dacapo/experiments/trainers/gunpowder_trainer.py b/dacapo/experiments/trainers/gunpowder_trainer.py index 46379acf4..57166beb3 100644 --- a/dacapo/experiments/trainers/gunpowder_trainer.py +++ b/dacapo/experiments/trainers/gunpowder_trainer.py @@ -196,7 +196,7 @@ def build_batch_provider(self, datasets, model, task, snapshot_container=None): def iterate(self, num_iterations, model, optimizer, device): t_start_fetch = time.time() - logger.info("Starting iteration!") + print("Starting iteration!") for iteration in range(self.iteration, self.iteration + num_iterations): raw, gt, target, weight, mask = self.next() diff --git a/dacapo/gp/elastic_augment_fuse.py b/dacapo/gp/elastic_augment_fuse.py index 3de3ed333..b070d20ab 100644 --- a/dacapo/gp/elastic_augment_fuse.py +++ b/dacapo/gp/elastic_augment_fuse.py @@ -82,9 +82,9 @@ def _create_rotation_transformation(shape, angle, subsample=1, voxel_size=None): # rotate control points center = np.array([0.5 * (d - 1) * vs for d, vs in zip(shape, voxel_size)]) - # logger.info("Creating rotation transformation with:") - # logger.info("\tangle : " + str(angle)) - # logger.info("\tcenter: " + str(center)) + # print("Creating rotation transformation with:") + # print("\tangle : " + str(angle)) + # print("\tcenter: " + str(center)) control_point_offsets = np.zeros((dims,) + control_points, dtype=np.float32) for control_point in np.ndindex(control_points): @@ -116,9 +116,9 @@ def _create_uniform_3d_transformation(shape, rotation, subsample=1, voxel_size=N # rotate control points center = np.array([0.5 * (d - 1) * vs for d, vs in zip(shape, voxel_size)]) - # logger.info("Creating rotation transformation with:") - # logger.info("\tangle : " + str(angle)) - # logger.info("\tcenter: " + str(center)) + # print("Creating rotation transformation with:") + # print("\tangle : " + str(angle)) + # print("\tcenter: " + str(center)) control_point_offsets = np.zeros((dims,) + control_points, dtype=np.float32) for control_point in np.ndindex(control_points): diff --git a/dacapo/predict.py b/dacapo/predict.py index 1ea363ea0..4a5fa9ebc 100644 --- a/dacapo/predict.py +++ b/dacapo/predict.py @@ -106,11 +106,9 @@ def predict( if isinstance(output_dtype, str): output_dtype = np.dtype(output_dtype) - logger.info( - "Predicting with input size %s, output size %s", input_size, output_size - ) + print("Predicting with input size %s, output size %s", input_size, output_size) - logger.info("Total input ROI: %s, output ROI: %s", _input_roi, output_roi) + print("Total input ROI: %s, output ROI: %s", _input_roi, output_roi) # prepare prediction dataset axes = ["c"] + [axis for axis in raw_array.axes if axis != "c"] @@ -126,7 +124,7 @@ def predict( # run blockwise prediction worker_file = str(Path(Path(dacapo.blockwise.__file__).parent, "predict_worker.py")) - logger.info("Running blockwise prediction with worker_file: ", worker_file) + print("Running blockwise prediction with worker_file: ", worker_file) run_blockwise( worker_file=worker_file, total_roi=_input_roi, diff --git a/dacapo/store/file_config_store.py b/dacapo/store/file_config_store.py index ae88ebdd0..aaad9b7f0 100644 --- a/dacapo/store/file_config_store.py +++ b/dacapo/store/file_config_store.py @@ -20,7 +20,7 @@ class FileConfigStore(ConfigStore): """ def __init__(self, path): - logger.info("Creating FileConfigStore:\n\tpath : %s", path) + print("Creating FileConfigStore:\n\tpath : %s", path) self.path = Path(path) diff --git a/dacapo/store/file_stats_store.py b/dacapo/store/file_stats_store.py index b3ce77f37..d367e842d 100644 --- a/dacapo/store/file_stats_store.py +++ b/dacapo/store/file_stats_store.py @@ -17,7 +17,7 @@ class FileStatsStore(StatsStore): """ def __init__(self, path): - logger.info("Creating MongoStatsStore:\n\tpath : %s", path) + print("Creating MongoStatsStore:\n\tpath : %s", path) self.path = Path(path) @@ -35,7 +35,7 @@ def store_training_stats(self, run_name, stats): if stats.trained_until() > existing_stats.trained_until(): # current stats go further than the one in DB store_from_iteration = existing_stats.trained_until() - logger.info( + print( "Updating training stats of run %s after iteration %d", run_name, store_from_iteration, @@ -65,7 +65,7 @@ def store_validation_iteration_scores(self, run_name, scores): self.__delete_validation_iteration_scores(run_name) if store_from_iteration > 0: - logger.info( + print( "Updating validation scores of run %s after iteration " "%d", run_name, store_from_iteration, diff --git a/dacapo/store/local_array_store.py b/dacapo/store/local_array_store.py index 73994d980..1c6e80c5b 100644 --- a/dacapo/store/local_array_store.py +++ b/dacapo/store/local_array_store.py @@ -113,7 +113,7 @@ def remove(self, array_identifier: "LocalArrayIdentifier") -> None: ) return - logger.info("Removing dataset %s in container %s", dataset, container) + print("Removing dataset %s in container %s", dataset, container) shutil.rmtree(path) def __get_run_dir(self, run_name: str) -> Path: diff --git a/dacapo/store/local_weights_store.py b/dacapo/store/local_weights_store.py index fe72eb059..c95aca9af 100644 --- a/dacapo/store/local_weights_store.py +++ b/dacapo/store/local_weights_store.py @@ -17,7 +17,7 @@ class LocalWeightsStore(WeightsStore): """A local store for network weights.""" def __init__(self, basedir): - logger.info("Creating local weights store in directory %s", basedir) + print("Creating local weights store in directory %s", basedir) self.basedir = basedir @@ -52,7 +52,7 @@ def store_weights(self, run: Run, iteration: int): def retrieve_weights(self, run: str, iteration: int) -> Weights: """Retrieve the network weights of the given run.""" - logger.info("Retrieving weights for run %s, iteration %d", run, iteration) + print("Retrieving weights for run %s, iteration %d", run, iteration) weights_name = self.__get_weights_dir(run) / "iterations" / str(iteration) @@ -107,7 +107,7 @@ def store_best(self, run: str, iteration: int, dataset: str, criterion: str): f.write(json.dumps({"iteration": iteration})) def retrieve_best(self, run: str, dataset: str | Dataset, criterion: str) -> int: - logger.info("Retrieving weights for run %s, criterion %s", run, criterion) + print("Retrieving weights for run %s, criterion %s", run, criterion) with (self.__get_weights_dir(run) / criterion / f"{dataset}.json").open( "r" @@ -117,7 +117,7 @@ def retrieve_best(self, run: str, dataset: str | Dataset, criterion: str) -> int return weights_info["iteration"] def _load_best(self, run: Run, criterion: str): - logger.info("Retrieving weights for run %s, criterion %s", run, criterion) + print("Retrieving weights for run %s, criterion %s", run, criterion) weights_name = self.__get_weights_dir(run) / f"{criterion}" diff --git a/dacapo/store/mongo_config_store.py b/dacapo/store/mongo_config_store.py index 5739dac58..f89ee94d5 100644 --- a/dacapo/store/mongo_config_store.py +++ b/dacapo/store/mongo_config_store.py @@ -22,7 +22,7 @@ class MongoConfigStore(ConfigStore): """ def __init__(self, db_host, db_name): - logger.info( + print( "Creating MongoConfigStore:\n\thost : %s\n\tdatabase: %s", db_host, db_name, diff --git a/dacapo/store/mongo_stats_store.py b/dacapo/store/mongo_stats_store.py index d0398caf9..06cc832ed 100644 --- a/dacapo/store/mongo_stats_store.py +++ b/dacapo/store/mongo_stats_store.py @@ -16,7 +16,7 @@ class MongoStatsStore(StatsStore): """ def __init__(self, db_host, db_name): - logger.info( + print( "Creating MongoStatsStore:\n\thost : %s\n\tdatabase: %s", db_host, db_name, @@ -41,7 +41,7 @@ def store_training_stats(self, run_name: str, stats: TrainingStats): if stats.trained_until() > existing_stats.trained_until(): # current stats go further than the one in DB store_from_iteration = existing_stats.trained_until() - logger.info( + print( "Updating training stats of run %s after iteration %d", run_name, store_from_iteration, @@ -76,7 +76,7 @@ def store_validation_iteration_scores( self.__delete_validation_scores(run_name) if store_from_iteration > 0: - logger.info( + print( "Updating validation scores of run %s after iteration " "%d", run_name, store_from_iteration, diff --git a/dacapo/train.py b/dacapo/train.py index 9b43d26d2..6a0d00d54 100644 --- a/dacapo/train.py +++ b/dacapo/train.py @@ -27,7 +27,7 @@ def train(run_name: str): # # we are done here. # return - logger.info("Training run %s", run_name) + print("Training run %s", run_name) # create run @@ -39,7 +39,7 @@ def train(run_name: str): def train_run(run: Run): - logger.info("Starting/resuming training for run %s...", run) + print("Starting/resuming training for run %s...", run) # create run @@ -52,13 +52,13 @@ def train_run(run: Run): trained_until = run.training_stats.trained_until() validated_until = run.validation_scores.validated_until() if validated_until > trained_until: - logger.info( + print( f"Trained until {trained_until}, but validated until {validated_until}! " "Deleting extra validation stats" ) run.validation_scores.delete_after(trained_until) - logger.info("Current state: trained until %d/%d", trained_until, run.train_until) + print("Current state: trained until %d/%d", trained_until, run.train_until) # read weights of the latest iteration @@ -95,7 +95,7 @@ def train_run(run: Run): weights_store.retrieve_weights(run, iteration=trained_until) elif latest_weights_iteration == trained_until: - logger.info("Resuming training from iteration %d", trained_until) + print("Resuming training from iteration %d", trained_until) weights_store.retrieve_weights(run, iteration=trained_until) @@ -204,4 +204,4 @@ def train_run(run: Run): run.move_optimizer(compute_context.device) run.model.train() - logger.info("Trained until %d, finished.", trained_until) + print("Trained until %d, finished.", trained_until) diff --git a/dacapo/validate.py b/dacapo/validate.py index 65f5b040b..e1d6065a5 100644 --- a/dacapo/validate.py +++ b/dacapo/validate.py @@ -27,7 +27,7 @@ def validate( stored checkpoint. Returns the best parameters and scores for this iteration.""" - logger.info("Validating run %s at iteration %d...", run_name, iteration) + print("Validating run %s at iteration %d...", run_name, iteration) # create run @@ -78,7 +78,7 @@ def validate_run( or len(run.datasplit.validate) == 0 or run.datasplit.validate[0].gt is None ): - logger.info("Cannot validate run %s. Continuing training!", run.name) + print("Cannot validate run %s. Continuing training!", run.name) return None, None # get array and weight store @@ -100,9 +100,7 @@ def validate_run( ) raise NotImplementedError - logger.info( - "Validating run %s on dataset %s", run.name, validation_dataset.name - ) + print("Validating run %s on dataset %s", run.name, validation_dataset.name) ( input_raw_array_identifier, @@ -116,7 +114,7 @@ def validate_run( f"{input_gt_array_identifier.container}/{input_gt_array_identifier.dataset}" ).exists() ): - logger.info("Copying validation inputs!") + print("Copying validation inputs!") input_voxel_size = validation_dataset.raw.voxel_size output_voxel_size = run.model.scale(input_voxel_size) input_shape = run.model.eval_input_shape @@ -154,7 +152,7 @@ def validate_run( ) input_gt[output_roi] = validation_dataset.gt[output_roi] else: - logger.info("validation inputs already copied!") + print("validation inputs already copied!") prediction_array_identifier = array_store.validation_prediction_array( run.name, iteration, validation_dataset.name @@ -171,7 +169,7 @@ def validate_run( overwrite=overwrite, ) - logger.info("Predicted on dataset %s", validation_dataset.name) + print("Predicted on dataset %s", validation_dataset.name) post_processor.set_prediction(prediction_array_identifier)