From f5ebdafeb04dffda9a7b793fa6071d880d5e559a Mon Sep 17 00:00:00 2001 From: Evan Harvey Date: Thu, 21 Mar 2024 16:36:33 -0600 Subject: [PATCH 1/5] Apply formatting with a line length of 120 --- contrib/app/sofast/load_saved_data.py | 21 +- .../run_and_characterize_sofast_1_cam.py | 23 +- .../run_and_characterize_sofast_2_cam.py | 31 +- contrib/app/ufacet-s/UfacetFlight.py | 21 +- .../01_main_demonstrate_solar_field_plots.py | 190 +- .../02_main_altitude_gaze_analysis_yz.py | 52 +- .../U_Code/03_main_flight_plan_assembly.py | 104 +- .../U_Code/04_main_ufacet_xy_analysis.py | 203 +- .../U_Code/05_main_ufacet_scan_pass_build.py | 22 +- .../U_Code/06_main_planner_trial_study.py | 34 +- .../07_main_generate_flight_plan_suite.py | 34 +- ...main_generate_half_and_half_flight_plan.py | 34 +- .../U_Code/93_quick_nsttf_survey.py | 44 +- .../U_Code/lib/FlightOverSolarField.py | 12 +- .../U_Code/lib/FlightPlan.py | 32 +- .../U_Code/lib/Heliostat.py | 151 +- .../lib/RenderControlScanSectionAnalysis.py | 212 +-- .../U_Code/lib/RenderControlWayPoint.py | 9 +- .../U_Code/lib/UfacetScanPass.py | 26 +- .../U_Code/lib/define_render_control.py | 20 +- .../U_Code/lib/define_scan_nsttf.py | 111 +- .../lib/generate_NSTTF_ufacet_plans_1.py | 26 +- .../U_Code/lib/plan_and_render_scan.py | 57 +- .../U_Code/lib/plan_scan_parameters.py | 4 +- .../U_Code/lib/plan_scan_raster.py | 8 +- .../U_Code/lib/plan_scan_raster_parameters.py | 16 +- .../U_Code/lib/plan_scan_ufacet.py | 26 +- .../U_Code/lib/plan_scan_ufacet_parameters.py | 27 +- .../U_Code/lib/plan_scan_ufacet_render.py | 4 +- .../lib/plan_scan_ufacet_section_analysis.py | 275 +-- ...lan_scan_ufacet_section_analysis_render.py | 566 ++---- .../plan_scan_ufacet_section_construction.py | 56 +- ...scan_ufacet_section_construction_render.py | 52 +- .../lib/plan_scan_ufacet_xy_analysis.py | 25 +- .../plan_scan_ufacet_xy_analysis_render.py | 23 +- .../U_Code/lib/plan_scan_vanity.py | 7 +- .../U_Code/lib/plan_scan_vanity_parameters.py | 24 +- .../helio_scan/070_ExtractedFrames.py | 43 +- .../helio_scan/080_FramesNoDuplicates.py | 78 +- .../helio_scan/130_KeyFramesGivenManual.py | 128 +- .../app/ufacet-s/helio_scan/140_KeyCorners.py | 274 +-- .../app/ufacet-s/helio_scan/150_KeyTracks.py | 281 +-- .../ufacet-s/helio_scan/160_VideoTracks.py | 348 +--- .../helio_scan/170_HeliostatTracks.py | 229 +-- .../ufacet-s/helio_scan/180_Heliostats3d.py | 204 +- .../helio_scan/190_TrajectoryAnalysis.py | 1639 ++++------------- .../helio_scan/lib/DEPRECATED_save_read.py | 45 +- .../lib/DEPRECATED_specifications.py | 52 +- .../helio_scan/lib/DEPRECATED_utils.py | 109 +- .../helio_scan/lib/FrameNameXyList.py | 92 +- .../helio_scan/lib/HeliostatInfer2dFrame.py | 20 +- .../helio_scan/lib/HeliostatInfer3d.py | 559 ++---- .../helio_scan/lib/KeyFrameCornerSearch.py | 572 ++---- .../helio_scan/lib/KeyFrameTrackSearch.py | 242 +-- .../helio_scan/lib/NameFrameXyList.py | 45 +- ...oTest_Heliostat3dInfer_ORIGINAL_SCRATCH.py | 234 +-- .../lib/ufacet_heliostat_3d_analysis.py | 408 +--- .../helio_scan/lib/ufacet_pipeline_clear.py | 9 +- .../helio_scan/lib/ufacet_pipeline_frame.py | 46 +- .../test/NoTest_Heliostat3dInfer.py | 85 +- .../helio_scan/test/NoTest_Reconstruct.py | 91 +- contrib/scripts/AbstractFileFingerprint.py | 5 +- contrib/scripts/FileFingerprint.py | 5 +- contrib/scripts/SensitiveStringMatcher.py | 8 +- contrib/scripts/sensitive_strings.py | 182 +- contrib/scripts/test/test_FileFingerprint.py | 32 +- .../test/test_SensitiveStringMatcher.py | 43 +- .../scripts/test/test_sensitive_strings.py | 44 +- .../downsample_data_general.py | 6 +- .../generate_downsampled_dataset.py | 8 +- .../generate_downsampled_calibration_data.py | 8 +- .../sofast_fringe/downsample_data.py | 4 +- .../generate_downsampled_calibration_data.py | 28 +- .../generate_downsampled_dataset.py | 15 +- .../generate_test_data_multi_facet.py | 13 +- .../generate_test_data_single_facet.py | 12 +- .../generate_test_data_undefined.py | 19 +- .../example_view_camera_distortion.py | 4 +- example/camera_io/live_view_color_Basler.py | 17 +- example/camera_io/live_view_mono_Basler.py | 17 +- .../run_and_save_images_Basler_color.py | 18 +- example/csp/example_optics_and_ray_tracing.py | 44 +- example/mirror/example_MirrorOutput.py | 174 +- example/raytrace/example_RayTraceOutput.py | 186 +- .../example_annotate_aruco_markers.py | 20 +- .../example_make_aruco_markers.py | 14 +- .../example_scene_reconstruction.py | 8 +- ...ulate_dot_locations_from_display_object.py | 21 +- example/sofast_fixed/find_blobs_in_image.py | 4 +- .../physical_target_dot_calibration.py | 27 +- .../process_fixed_pattern_data.py | 42 +- .../run_and_characterize_fixed_pattern.py | 49 +- .../run_fixed_pattern_projection.py | 7 +- ...mple_calibration_save_DisplayShape_file.py | 8 +- .../example_calibration_screen_shape.py | 7 +- .../example_process_facet_ensemble.py | 14 +- .../example_process_single_facet.py | 17 +- .../example_process_undefined_shape.py | 17 +- .../example_standard_mirror_plot_output.py | 20 +- .../solarfield/example_SolarFieldOutput.py | 168 +- example/targetcolor/example_TargetColor.py | 141 +- .../camera_calibration/CameraCalibration.py | 108 +- .../lib/ViewAnnotatedImages.py | 4 +- .../lib/calibration_camera.py | 35 +- .../lib/image_processing.py | 18 +- .../test/test_camera_calibration.py | 12 +- .../lib/SceneReconstruction.py | 65 +- .../test/generate_downsampled_dataset.py | 15 +- .../test/test_SceneReconstruction.py | 4 +- .../select_image_points/SelectImagePoints.py | 24 +- opencsp/app/sofast/SofastGUI.py | 278 +-- opencsp/app/sofast/lib/BlobIndex.py | 77 +- .../app/sofast/lib/CalibrateDisplayShape.py | 144 +- .../sofast/lib/CalibrateSofastFixedDots.py | 78 +- opencsp/app/sofast/lib/DefinitionEnsemble.py | 13 +- opencsp/app/sofast/lib/DefinitionFacet.py | 14 +- opencsp/app/sofast/lib/DisplayShape.py | 38 +- .../sofast/lib/DotLocationsFixedPattern.py | 37 +- opencsp/app/sofast/lib/Fringes.py | 16 +- .../sofast/lib/ImageCalibrationAbstract.py | 32 +- .../app/sofast/lib/ImageCalibrationGlobal.py | 4 +- .../app/sofast/lib/ImageCalibrationScaling.py | 16 +- .../app/sofast/lib/MeasurementSofastFringe.py | 20 +- opencsp/app/sofast/lib/ParamsSofastFringe.py | 8 +- opencsp/app/sofast/lib/ProcessSofastFixed.py | 37 +- opencsp/app/sofast/lib/ProcessSofastFringe.py | 163 +- opencsp/app/sofast/lib/SpatialOrientation.py | 38 +- opencsp/app/sofast/lib/SystemSofastFixed.py | 20 +- opencsp/app/sofast/lib/SystemSofastFringe.py | 75 +- .../sofast/lib/calculation_data_classes.py | 14 +- opencsp/app/sofast/lib/image_processing.py | 80 +- .../app/sofast/lib/process_optics_geometry.py | 194 +- .../app/sofast/lib/save_DisplayShape_file.py | 10 +- opencsp/app/sofast/lib/spatial_processing.py | 46 +- opencsp/app/sofast/lib/visualize_setup.py | 71 +- .../sofast/test/test_CalibrateDisplayShape.py | 22 +- .../test/test_CalibrateSofastFixedDots.py | 16 +- opencsp/app/sofast/test/test_Display.py | 36 +- .../test/test_DotLocationsFixedPattern.py | 18 +- .../test/test_ImageCalibrationGlobal.py | 31 +- .../sofast/test/test_SpatialOrientation.py | 4 +- .../app/sofast/test/test_SystemSofastFixed.py | 8 +- .../sofast/test/test_SystemSofastFringe.py | 4 +- .../app/sofast/test/test_image_processing.py | 27 +- .../test/test_integration_multi_facet.py | 54 +- .../test/test_integration_single_facet.py | 64 +- .../sofast/test/test_integration_undefined.py | 20 +- .../test/test_project_fixed_pattern_target.py | 7 +- .../test/test_save_DisplayShape_file.py | 17 +- .../sofast/test/test_spatial_processing.py | 39 +- .../app/target/target_color/lib/ImageColor.py | 12 +- .../app/target/target_color/target_color.py | 54 +- .../target_color/target_color_bullseye.py | 28 +- .../target_color_bullseye_error.py | 8 +- opencsp/common/lib/camera/Camera.py | 57 +- opencsp/common/lib/camera/CameraTransform.py | 34 +- .../lib/camera/ImageAcquisitionAbstract.py | 4 +- .../lib/camera/ImageAcquisition_DCAM_color.py | 8 +- .../lib/camera/ImageAcquisition_DCAM_mono.py | 18 +- .../lib/camera/ImageAcquisition_MSMF.py | 8 +- opencsp/common/lib/camera/LiveView.py | 13 +- opencsp/common/lib/camera/UCamera.py | 48 +- opencsp/common/lib/camera/image_processing.py | 8 +- opencsp/common/lib/camera/test/test_Camera.py | 8 +- opencsp/common/lib/csp/Facet.py | 72 +- opencsp/common/lib/csp/FacetEnsemble.py | 92 +- opencsp/common/lib/csp/LightPath.py | 37 +- opencsp/common/lib/csp/LightPathEnsemble.py | 24 +- opencsp/common/lib/csp/LightSourcePoint.py | 8 +- opencsp/common/lib/csp/LightSourceSun.py | 33 +- opencsp/common/lib/csp/MirrorAbstract.py | 68 +- opencsp/common/lib/csp/MirrorParametric.py | 20 +- .../lib/csp/MirrorParametricRectangular.py | 6 +- opencsp/common/lib/csp/MirrorPoint.py | 63 +- opencsp/common/lib/csp/OpticOrientation.py | 24 +- opencsp/common/lib/csp/RayTrace.py | 99 +- opencsp/common/lib/csp/RayTraceable.py | 9 +- opencsp/common/lib/csp/SolarField.py | 109 +- .../VisualizeOrthorectifiedSlopeAbstract.py | 34 +- opencsp/common/lib/csp/standard_output.py | 96 +- opencsp/common/lib/csp/sun_position.py | 26 +- opencsp/common/lib/csp/sun_track.py | 12 +- .../lib/csp/test/test_MirrorParametric.py | 4 +- .../common/lib/csp/test/test_MirrorPoint.py | 8 +- .../csp/test/test_csp_optics_orientations.py | 8 +- opencsp/common/lib/csp/ufacet/Facet.py | 86 +- opencsp/common/lib/csp/ufacet/Heliostat.py | 250 +-- .../lib/csp/ufacet/HeliostatConfiguration.py | 4 +- opencsp/common/lib/cv/AbstractFiducial.py | 4 +- opencsp/common/lib/cv/CacheableImage.py | 14 +- opencsp/common/lib/cv/OpticalFlow.py | 54 +- opencsp/common/lib/cv/SpotAnalysis.py | 76 +- .../lib/cv/spot_analysis/ImagesIterable.py | 17 +- .../spot_analysis/SpotAnalysisImagesStream.py | 4 +- .../cv/spot_analysis/SpotAnalysisOperable.py | 36 +- .../SpotAnalysisOperableAttributeParser.py | 25 +- .../SpotAnalysisOperablesStream.py | 31 +- .../AbstractSpotAnalysisImageProcessor.py | 34 +- ...AbstractSpotAnalysisImageProcessorLeger.py | 57 +- .../image_processor/EchoImageProcessor.py | 8 +- .../FalseColorImageProcessor.py | 26 +- .../image_processor/LogScaleImageProcessor.py | 27 +- .../PopulationStatisticsImageProcessor.py | 40 +- .../spot_analysis/image_processor/__init__.py | 12 +- .../test/TestFalseColorImageProcessor.py | 16 +- .../TestPopulationStatisticsImageProcessor.py | 44 +- .../common/lib/cv/test/test_OpticalFlow.py | 145 +- .../CalibrationCameraPosition.py | 44 +- .../lib/deflectometry/ImageProjection.py | 74 +- .../deflectometry/ImageProjectionSetupGUI.py | 74 +- .../ParamsSlopeSolverAbstract.py | 1 + .../ParamsSlopeSolverParaboloid.py | 4 +- .../deflectometry/ParamsSlopeSolverPlano.py | 3 +- .../common/lib/deflectometry/SlopeSolver.py | 24 +- .../lib/deflectometry/Surface2DAbstract.py | 44 +- .../lib/deflectometry/Surface2DParabolic.py | 109 +- .../lib/deflectometry/Surface2DPlano.py | 70 +- .../lib/deflectometry/slope_fitting_2d.py | 34 +- .../test/test_CalibrationCameraPosition.py | 16 +- .../deflectometry/test/test_SlopeSolver.py | 39 +- .../lib/deflectometry/test/test_Surface2D.py | 77 +- .../lib/file/AbstractAttributeParser.py | 8 +- opencsp/common/lib/file/AttributesManager.py | 31 +- opencsp/common/lib/file/CsvColumns.py | 13 +- .../lib/file/test/test_AttributesManager.py | 4 +- opencsp/common/lib/geo/lon_lat_nsttf.py | 4 +- opencsp/common/lib/geometry/EdgeXY.py | 16 +- .../lib/geometry/FunctionXYContinuous.py | 11 +- .../common/lib/geometry/FunctionXYDiscrete.py | 15 +- opencsp/common/lib/geometry/FunctionXYGrid.py | 20 +- opencsp/common/lib/geometry/Intersection.py | 47 +- opencsp/common/lib/geometry/LineXY.py | 18 +- opencsp/common/lib/geometry/LoopXY.py | 36 +- opencsp/common/lib/geometry/ReferenceFrame.py | 4 +- opencsp/common/lib/geometry/RegionXY.py | 33 +- opencsp/common/lib/geometry/TransformXYZ.py | 4 +- opencsp/common/lib/geometry/TranslationXYZ.py | 6 +- opencsp/common/lib/geometry/Vxy.py | 12 +- opencsp/common/lib/geometry/Vxyz.py | 8 +- opencsp/common/lib/geometry/angle.py | 4 +- opencsp/common/lib/geometry/geometry_2d.py | 47 +- opencsp/common/lib/geometry/geometry_3d.py | 56 +- .../common/lib/geometry/matrix_geometry_3d.py | 4 +- .../common/lib/geometry/test/test_LineXY.py | 22 +- opencsp/common/lib/geometry/transform_3d.py | 11 +- opencsp/common/lib/opencsp_path/__init__.py | 22 +- .../lib/opencsp_path/data_path_for_test.py | 17 +- .../lib/opencsp_path/opencsp_root_path.py | 51 +- .../test/test_opencsp_root_path.py | 19 +- .../common/lib/photogrammetry/ImageMarker.py | 40 +- .../lib/photogrammetry/bundle_adjustment.py | 50 +- .../lib/photogrammetry/photogrammetry.py | 48 +- .../test/test_bundle_adjustment.py | 22 +- .../test/test_photogrammetry.py | 24 +- opencsp/common/lib/process/MemoryMonitor.py | 8 +- .../common/lib/process/ParallelPartitioner.py | 47 +- .../common/lib/process/ServerSynchronizer.py | 78 +- .../common/lib/process/parallel_file_tools.py | 7 +- .../lib/process/parallel_video_tools.py | 39 +- .../common/lib/process/subprocess_tools.py | 68 +- .../test/lib/subprocess_test_helper.py | 34 +- .../lib/process/test/test_MemoryMonitor.py | 17 +- .../test/test_MultiprocessNonDaemonic.py | 26 +- .../process/test/test_ParallelPartitioner.py | 40 +- .../process/test/test_parallel_file_tools.py | 4 +- .../lib/process/test/test_subprocess_tools.py | 32 +- .../common/lib/render/ImageAttributeParser.py | 20 +- opencsp/common/lib/render/PlotAnnotation.py | 100 +- opencsp/common/lib/render/PowerpointSlide.py | 87 +- opencsp/common/lib/render/VideoHandler.py | 181 +- opencsp/common/lib/render/View3d.py | 103 +- opencsp/common/lib/render/axis_3d.py | 8 +- .../common/lib/render/figure_management.py | 87 +- opencsp/common/lib/render/image_plot.py | 11 +- .../common/lib/render/lib/PowerpointImage.py | 45 +- .../common/lib/render/lib/PowerpointShape.py | 6 +- .../common/lib/render/lib/PowerpointText.py | 14 +- .../render/test/test_ImageAttributeParser.py | 20 +- .../lib/render/test/test_VideoHandler.py | 47 +- .../lib/render/test/test_figure_management.py | 68 +- opencsp/common/lib/render/view_spec.py | 20 +- .../lib/render_control/RenderControlAxis.py | 19 +- .../RenderControlDeflectometryInstrument.py | 16 +- .../RenderControlEvaluateHeliostats3d.py | 27 +- .../RenderControlFigureRecord.py | 33 +- .../RenderControlFramesNoDuplicates.py | 5 +- .../render_control/RenderControlHeliostat.py | 16 +- .../RenderControlHeliostatTracks.py | 12 +- .../RenderControlHeliostats3d.py | 12 +- .../render_control/RenderControlKeyCorners.py | 4 +- .../lib/render_control/RenderControlMirror.py | 4 +- .../render_control/RenderControlPointSeq.py | 28 +- .../RenderControlPowerpointPresentation.py | 24 +- .../render_control/RenderControlSolarField.py | 17 +- .../lib/render_control/RenderControlText.py | 12 +- .../RenderControlTrajectoryAnalysis.py | 12 +- .../lib/render_control/RenderControlVideo.py | 27 +- .../RenderControlVideoFrames.py | 22 +- .../RenderControlVideoTracks.py | 8 +- opencsp/common/lib/target/TargetAbstract.py | 13 +- opencsp/common/lib/target/TargetColor.py | 388 +--- .../common/lib/target/target_color_2d_rgb.py | 26 +- .../common/lib/target/target_color_convert.py | 76 +- opencsp/common/lib/target/target_image.py | 4 +- opencsp/common/lib/test/TestOutput.py | 43 +- opencsp/common/lib/test/support_test.py | 53 +- opencsp/common/lib/test/test_MirrorOutput.py | 166 +- .../common/lib/test/test_RayTraceOutput.py | 166 +- .../common/lib/test/test_SolarFieldOutput.py | 112 +- opencsp/common/lib/test/test_TargetColor.py | 129 +- opencsp/common/lib/tool/dict_tools.py | 46 +- opencsp/common/lib/tool/file_tools.py | 277 +-- opencsp/common/lib/tool/hdf5_tools.py | 22 +- opencsp/common/lib/tool/image_tools.py | 21 +- opencsp/common/lib/tool/list_tools.py | 16 +- opencsp/common/lib/tool/log_tools.py | 14 +- opencsp/common/lib/tool/math_tools.py | 23 +- opencsp/common/lib/tool/system_tools.py | 4 +- .../common/lib/tool/test/test_file_tools.py | 20 +- .../common/lib/tool/test/test_list_tools.py | 38 +- .../common/lib/tool/test/test_log_tools.py | 75 +- .../common/lib/tool/test/test_math_tools.py | 8 +- .../common/lib/tool/test/test_typing_tools.py | 8 +- opencsp/common/lib/tool/time_date_tools.py | 31 +- opencsp/common/lib/tool/typing_tools.py | 13 +- opencsp/common/lib/uas/Scan.py | 12 +- opencsp/common/lib/uas/ScanPass.py | 68 +- opencsp/common/lib/uas/WayPoint.py | 4 +- opencsp/test/test_DocStringsExist.py | 4 +- 329 files changed, 4339 insertions(+), 14632 deletions(-) diff --git a/contrib/app/sofast/load_saved_data.py b/contrib/app/sofast/load_saved_data.py index f3b37012..77e9e426 100644 --- a/contrib/app/sofast/load_saved_data.py +++ b/contrib/app/sofast/load_saved_data.py @@ -43,27 +43,20 @@ def load_ideal_facet_ensemble_from_hdf(file: str, focal_length: float) -> FacetE file, ) vs_facet_loc = Vxyz(data_ensemble['v_facet_locations']) - rs_facet_ensemble = [ - Rotation.from_rotvec(v) for v in data_ensemble['r_facet_ensemble'] - ] + rs_facet_ensemble = [Rotation.from_rotvec(v) for v in data_ensemble['r_facet_ensemble']] # Load ensemble definition data num_facets = len(vs_facet_loc) facets = [] for idx_facet in range(num_facets): - data = load_hdf5_datasets( - [f'DataSofastInput/optic_definition/facet_{idx_facet:03d}/v_facet_corners'], - file, - ) + data = load_hdf5_datasets([f'DataSofastInput/optic_definition/facet_{idx_facet:03d}/v_facet_corners'], file) # Create mirror region v_facet_corners = Vxy(data['v_facet_corners'][:2]) region_facet = RegionXY.from_vertices(v_facet_corners) # Create mirror - mirror = MirrorParametric.generate_symmetric_paraboloid( - focal_length, region_facet - ) + mirror = MirrorParametric.generate_symmetric_paraboloid(focal_length, region_facet) # Create facet facet = Facet(mirror) @@ -98,9 +91,7 @@ def load_facet_ensemble_from_hdf(file: str) -> FacetEnsemble: "rotation_defined" Ensemble. """ # Get number of facets - data_ensemble = load_hdf5_datasets( - ['DataSofastInput/optic_definition/ensemble/v_facet_locations'], file - ) + data_ensemble = load_hdf5_datasets(['DataSofastInput/optic_definition/ensemble/v_facet_locations'], file) num_facets = data_ensemble['v_facet_locations'].shape[1] facets = [] @@ -160,9 +151,7 @@ def load_ideal_facet_from_hdf(file: str, focal_length: float) -> Facet: Reference facet representation. Defined as "rotation_defined." """ # Load facet corners - data = load_hdf5_datasets( - ['DataSofastInput/optic_definition/facet_000/v_facet_corners'], file - ) + data = load_hdf5_datasets(['DataSofastInput/optic_definition/facet_000/v_facet_corners'], file) # Create mirror v_facet_corners = Vxy(data['v_facet_corners'][:2]) diff --git a/contrib/app/sofast/run_and_characterize_sofast_1_cam.py b/contrib/app/sofast/run_and_characterize_sofast_1_cam.py index fb5bd975..629e3c8d 100644 --- a/contrib/app/sofast/run_and_characterize_sofast_1_cam.py +++ b/contrib/app/sofast/run_and_characterize_sofast_1_cam.py @@ -28,9 +28,7 @@ def main(): # Define file locations data_dir = '../../sofast_2_system_calibration_files/' - file_image_projection = os.path.join( - data_dir, 'Image_Projection_optics_lab_landscape_rectangular.h5' - ) + file_image_projection = os.path.join(data_dir, 'Image_Projection_optics_lab_landscape_rectangular.h5') file_display = os.path.join(data_dir, 'Display_optics_lab_landscape_distorted2D.h5') file_camera = os.path.join(data_dir, 'Camera_optics_lab_landscape.h5') file_facet = os.path.join(data_dir, 'Facet_NSTTF.json') @@ -48,10 +46,7 @@ def main(): # Define surface fitting parameters surface_data = dict( - surface_type='parabolic', - initial_focal_lengths_xy=(100.0, 100.0), - robust_least_squares=False, - downsample=10, + surface_type='parabolic', initial_focal_lengths_xy=(100.0, 100.0), robust_least_squares=False, downsample=10 ) # Create fringe object @@ -83,19 +78,13 @@ def func_calibrate_exposure(): # Capture calibration data def func_capture_calibration_frames(): print('Capturing display-camera response calibration data') - system.run_display_camera_response_calibration( - res=10, run_next=system.run_next_in_queue - ) + system.run_display_camera_response_calibration(res=10, run_next=system.run_next_in_queue) # Process calibration data def func_process_calibration_data(): print('Processing calibration data') calibration_images = system.get_calibration_images() - calibrations.append( - ImageCalibrationScaling.from_data( - calibration_images[0], system.calibration_display_values - ) - ) + calibrations.append(ImageCalibrationScaling.from_data(calibration_images[0], system.calibration_display_values)) system.run_next_in_queue() # Load fringe object @@ -137,9 +126,7 @@ def func_close_all(): print('Saving Data') # Get Measurement object - measurement = system.get_measurements( - optic_measure_point, optic_screen_dist, optic_name - )[0] + measurement = system.get_measurements(optic_measure_point, optic_screen_dist, optic_name)[0] calibration = calibrations[0] # Process data diff --git a/contrib/app/sofast/run_and_characterize_sofast_2_cam.py b/contrib/app/sofast/run_and_characterize_sofast_2_cam.py index 8a9257f4..c40a98e4 100644 --- a/contrib/app/sofast/run_and_characterize_sofast_2_cam.py +++ b/contrib/app/sofast/run_and_characterize_sofast_2_cam.py @@ -28,18 +28,12 @@ def main(): # Define file locations data_dir = '../../sofast_2_system_calibration_files/' - file_image_projection = os.path.join( - data_dir, 'Image_Projection_optics_lab_landscape_rectangular.h5' - ) + file_image_projection = os.path.join(data_dir, 'Image_Projection_optics_lab_landscape_rectangular.h5') - file_display_0 = os.path.join( - data_dir, 'Display_optics_lab_landscape_distorted2D.h5' - ) + file_display_0 = os.path.join(data_dir, 'Display_optics_lab_landscape_distorted2D.h5') file_camera_0 = os.path.join(data_dir, 'Camera_optics_lab_landscape.h5') - file_display_1 = os.path.join( - data_dir, 'Display_optics_lab_landscape_distorted2D.h5' - ) + file_display_1 = os.path.join(data_dir, 'Display_optics_lab_landscape_distorted2D.h5') file_camera_1 = os.path.join(data_dir, 'Camera_optics_lab_landscape.h5') file_facet = os.path.join(data_dir, 'Facet_NSTTF.json') @@ -66,10 +60,7 @@ def main(): # Define surface fitting parameters surface_data = [ dict( - surface_type='parabolic', - initial_focal_lengths_xy=(100.0, 100.0), - robust_least_squares=False, - downsample=10, + surface_type='parabolic', initial_focal_lengths_xy=(100.0, 100.0), robust_least_squares=False, downsample=10 ) ] @@ -108,20 +99,14 @@ def func_calibrate_exposure(): # Capture calibration data def func_capture_calibration_frames(): print('Capturing display-camera response calibration data') - system.run_display_camera_response_calibration( - res=10, run_next=system.run_next_in_queue - ) + system.run_display_camera_response_calibration(res=10, run_next=system.run_next_in_queue) # Process calibration data def func_process_calibration_data(): print('Processing calibration data') calibration_images = system.get_calibration_images() for ims in calibration_images: - calibrations.append( - ImageCalibrationScaling.from_data( - ims, system.calibration_display_values - ) - ) + calibrations.append(ImageCalibrationScaling.from_data(ims, system.calibration_display_values)) system.run_next_in_queue() # Load fringe object @@ -167,9 +152,7 @@ def func_close_all(): print('Saving Data') # Get Measurement object - measurements = system.get_measurements( - optic_measure_point, optic_screen_dist, optic_name - ) + measurements = system.get_measurements(optic_measure_point, optic_screen_dist, optic_name) # Process data print('Processing data') diff --git a/contrib/app/ufacet-s/UfacetFlight.py b/contrib/app/ufacet-s/UfacetFlight.py index 96969f3f..8c10874b 100755 --- a/contrib/app/ufacet-s/UfacetFlight.py +++ b/contrib/app/ufacet-s/UfacetFlight.py @@ -49,16 +49,12 @@ class UfacetFlight: ] _flights: dict[str, "UfacetFlight"] = {} - def __init__( - self, name: str, extracted_frames_name_format: str, extracted_frames_dir: str - ): + def __init__(self, name: str, extracted_frames_name_format: str, extracted_frames_dir: str): """This class holds onto relevant information for a variety of UFACET flights.""" self._name = name self._frames_name_format_extracted = extracted_frames_name_format - self._frames_dir_extracted = os.path.join( - orp.opencsp_scratch_dir("ufacetnio"), extracted_frames_dir - ) + self._frames_dir_extracted = os.path.join(orp.opencsp_scratch_dir("ufacetnio"), extracted_frames_dir) self._num_frames_extracted: int = None self._frames_name_format_deduplicated = None @@ -147,9 +143,7 @@ def video_dir_name_ext(self, allow_ppt_compressed_videos=False): # check for a similar name else: exp = f"{video_dir}/{video_name}*{video_extension}" - lt.debug( - f"In UfacetFlight.video_dir_name_ext(): searching for videos matching the expression \"{exp}\"" - ) + lt.debug(f"In UfacetFlight.video_dir_name_ext(): searching for videos matching the expression \"{exp}\"") matches = list(glob.glob(exp)) to_remove, to_append_to_end = [], [] for m in matches: @@ -226,15 +220,10 @@ def frame_name_ext_deduplicated(self, frame_number: int): return self.frames_name_format_deduplicated() % frame_number def frame_path_name_ext_extracted(self, frame_number: int): - return os.path.join( - self.frames_dir_extracted(), self.frame_name_ext_extracted(frame_number) - ) + return os.path.join(self.frames_dir_extracted(), self.frame_name_ext_extracted(frame_number)) def frame_path_name_ext_deduplicated(self, frame_number: int): - return os.path.join( - self.frames_dir_deduplicated(), - self.frame_name_ext_deduplicated(frame_number), - ) + return os.path.join(self.frames_dir_deduplicated(), self.frame_name_ext_deduplicated(frame_number)) def _count_frames(self, path: str, name_format: str): if ft.directory_exists(path, follow_symlinks=True): diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/01_main_demonstrate_solar_field_plots.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/01_main_demonstrate_solar_field_plots.py index 98da3e87..24068e5f 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/01_main_demonstrate_solar_field_plots.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/01_main_demonstrate_solar_field_plots.py @@ -64,24 +64,17 @@ def draw_demonstration_figures( heliostat_name = '5E10' # View setup fig_record = fm.setup_figure_for_3d_data( - figure_control, - axis_control_m, - view_spec, - title=('Heliostat ' + heliostat_name), + figure_control, axis_control_m, view_spec, title=('Heliostat ' + heliostat_name) ) view = fig_record.view # Configuration setup heliostat = solar_field.lookup_heliostat(heliostat_name) heliostat.set_configuration(hc.face_south()) # Style setup - heliostat_styles = rce.RenderControlEnsemble( - rch.normal_facet_outlines_names(color='b') - ) + heliostat_styles = rce.RenderControlEnsemble(rch.normal_facet_outlines_names(color='b')) # Comment fig_record.comment.append("Demonstration of heliostat drawing.") - fig_record.comment.append( - "Facet outlines shown, with facet names and overall heliostat surface normal." - ) + fig_record.comment.append("Facet outlines shown, with facet names and overall heliostat surface normal.") # Draw heliostat.draw(view, heliostat_styles) view.show() @@ -92,10 +85,7 @@ def draw_demonstration_figures( heliostat_name = '5E10' # View setup fig_record = fm.setup_figure_for_3d_data( - figure_control, - axis_control_m, - view_spec, - title=('Heliostat ' + heliostat_name + ', with Highlighting'), + figure_control, axis_control_m, view_spec, title=('Heliostat ' + heliostat_name + ', with Highlighting') ) view = fig_record.view # Tracking setup @@ -103,20 +93,14 @@ def draw_demonstration_figures( heliostat.set_tracking(aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz) # Style setup default_heliostat_style = rch.normal_facet_outlines() - default_heliostat_style.facet_styles.add_special_name( - 16, rcf.corner_normals_outline_name(color='c') - ) - default_heliostat_style.facet_styles.add_special_names( - [1, 4, 7, 24, 25], rcf.normal_outline(color='r') - ) + default_heliostat_style.facet_styles.add_special_name(16, rcf.corner_normals_outline_name(color='c')) + default_heliostat_style.facet_styles.add_special_names([1, 4, 7, 24, 25], rcf.normal_outline(color='r')) heliostat_styles = rce.RenderControlEnsemble(default_heliostat_style) # Comment fig_record.comment.append("Demonstration of example heliostat annotations.") fig_record.comment.append("Black: Facet outlines.") fig_record.comment.append("Black: Overall heliostat surface normal.") - fig_record.comment.append( - "Red: Highlighted facets and their surface normals." - ) + fig_record.comment.append("Red: Highlighted facets and their surface normals.") fig_record.comment.append( "Cyan: Highlighted facet with facet name and facet surface normal drawn at corners." ) @@ -156,44 +140,30 @@ def draw_demonstration_figures( # Draw heliostat.draw(view, heliostat_styles) # Comment - fig_record.comment.append( - "Demonstration of various example heliostat drawing modes." - ) + fig_record.comment.append("Demonstration of various example heliostat drawing modes.") fig_record.comment.append("Black: Name only.") fig_record.comment.append("Red: Centroid only.") fig_record.comment.append("Green: Centroid and name.") fig_record.comment.append("Blue: Facet outlines.") - fig_record.comment.append( - "Cyan: Overall outline and overall surface normal." - ) - fig_record.comment.append( - "Magneta: Overall outline and overall surface normal, drawn at corners." - ) + fig_record.comment.append("Cyan: Overall outline and overall surface normal.") + fig_record.comment.append("Magneta: Overall outline and overall surface normal, drawn at corners.") fig_record.comment.append("Green: Facet outlines and overall surface normal.") fig_record.comment.append("Cyan: Facet outlines and facet surface normals.") - fig_record.comment.append( - "Black: Facet outlines and facet surface normals drawn at facet corners." - ) + fig_record.comment.append("Black: Facet outlines and facet surface normals drawn at facet corners.") view.show() # Solar field heliostat names. if draw_solar_field_h_names: # View setup - fig_record = fm.setup_figure_for_3d_data( - figure_control, axis_control_m, view_spec, title='Heliostat Names' - ) + fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, view_spec, title='Heliostat Names') view = fig_record.view # Tracking setup # Not required since we're not drawing heliostat shape. # Style setup solar_field_style = rcsf.heliostat_names(color='m') # Comment - fig_record.comment.append( - "Heliostat names, drawn at each heliostat's centroid." - ) - fig_record.comment.append( - "At NSTTF, centroids appear to be at the midpoint of the torque tube." - ) + fig_record.comment.append("Heliostat names, drawn at each heliostat's centroid.") + fig_record.comment.append("At NSTTF, centroids appear to be at the midpoint of the torque tube.") # Draw solar_field.draw(view, solar_field_style) view.show() @@ -201,9 +171,7 @@ def draw_demonstration_figures( # Solar field heliostat centroids. if draw_solar_field_h_centroids: # View setup - fig_record = fm.setup_figure_for_3d_data( - figure_control, axis_control_m, view_spec, title='Heliostat Centroids' - ) + fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, view_spec, title='Heliostat Centroids') view = fig_record.view # Tracking setup # Not required since we're not drawing heliostat shape. @@ -221,10 +189,7 @@ def draw_demonstration_figures( if draw_solar_field_h_centroids_names: # View setup fig_record = fm.setup_figure_for_3d_data( - figure_control, - axis_control_m, - view_spec, - title='Heliostat Labelled Centroids', + figure_control, axis_control_m, view_spec, title='Heliostat Labelled Centroids' ) view = fig_record.view # Tracking setup @@ -241,23 +206,17 @@ def draw_demonstration_figures( # (Plus aim point and legend.) if draw_solar_field_h_outlines: # View setup - fig_record = fm.setup_figure_for_3d_data( - figure_control, axis_control_m, view_spec, title='Heliostat Outlines' - ) + fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, view_spec, title='Heliostat Outlines') view = fig_record.view # Tracking setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_outlines(color='b') # Comment fig_record.comment.append("A simple way of rendering a solar field.") # Draw solar_field.draw(view, solar_field_style) - view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') view.show() # Annotated solar field. @@ -293,31 +252,19 @@ def draw_demonstration_figures( ) view = fig_record.view # Configuration setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) solar_field.set_heliostats_configuration(stowed_heliostats, hc.NSTTF_stow()) synch_configuration = hc.HeliostatConfiguration(az=synch_az, el=synch_el) - solar_field.set_heliostats_configuration( - synched_heliostats, synch_configuration - ) + solar_field.set_heliostats_configuration(synched_heliostats, synch_configuration) up_configuration = hc.HeliostatConfiguration(az=up_az, el=up_el) solar_field.set_heliostats_configuration(up_heliostats, up_configuration) # Style setup solar_field_style = rcsf.heliostat_outlines(color='b') - solar_field_style.heliostat_styles.add_special_names( - up_heliostats, rch.normal_outline(color='c') - ) - solar_field_style.heliostat_styles.add_special_names( - stowed_heliostats, rch.normal_outline(color='r') - ) - solar_field_style.heliostat_styles.add_special_names( - synched_heliostats, rch.normal_outline(color='g') - ) + solar_field_style.heliostat_styles.add_special_names(up_heliostats, rch.normal_outline(color='c')) + solar_field_style.heliostat_styles.add_special_names(stowed_heliostats, rch.normal_outline(color='r')) + solar_field_style.heliostat_styles.add_special_names(synched_heliostats, rch.normal_outline(color='g')) # Comment - fig_record.comment.append( - "A solar field situation with heliostats in varying status." - ) + fig_record.comment.append("A solar field situation with heliostats in varying status.") fig_record.comment.append("Blue heliostats are tracking.") fig_record.comment.append("Cyan heliostats are face up.") fig_record.comment.append("Red heliostats are in stow (out of service).") @@ -356,54 +303,25 @@ def draw_demonstration_figures( '7E6', '7E7', ] - tracking_heliostats = [ - '8E1', - '8E2', - '8E4', - '8E6', - '8E7', - '9E1', - '9E2', - '9E3', - '9E4', - '9E5', - '9E6', - '9E7', - ] + tracking_heliostats = ['8E1', '8E2', '8E4', '8E6', '8E7', '9E1', '9E2', '9E3', '9E4', '9E5', '9E6', '9E7'] # View setup - fig_record = fm.setup_figure_for_3d_data( - figure_control, axis_control_m, view_spec, title='Selected Heliostats' - ) + fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, view_spec, title='Selected Heliostats') view = fig_record.view # Configuration setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) solar_field.set_heliostats_configuration(stowed_heliostats, hc.NSTTF_stow()) synch_configuration = hc.HeliostatConfiguration(az=synch_az, el=synch_el) - solar_field.set_heliostats_configuration( - synched_heliostats, synch_configuration - ) + solar_field.set_heliostats_configuration(synched_heliostats, synch_configuration) up_configuration = hc.HeliostatConfiguration(az=up_az, el=up_el) solar_field.set_heliostats_configuration(up_heliostats, up_configuration) # Style setup solar_field_style = rcsf.heliostat_blanks() - solar_field_style.heliostat_styles.add_special_names( - up_heliostats, rch.normal_outline(color='c') - ) - solar_field_style.heliostat_styles.add_special_names( - stowed_heliostats, rch.normal_outline(color='r') - ) - solar_field_style.heliostat_styles.add_special_names( - synched_heliostats, rch.normal_outline(color='g') - ) - solar_field_style.heliostat_styles.add_special_names( - tracking_heliostats, rch.normal_outline(color='b') - ) + solar_field_style.heliostat_styles.add_special_names(up_heliostats, rch.normal_outline(color='c')) + solar_field_style.heliostat_styles.add_special_names(stowed_heliostats, rch.normal_outline(color='r')) + solar_field_style.heliostat_styles.add_special_names(synched_heliostats, rch.normal_outline(color='g')) + solar_field_style.heliostat_styles.add_special_names(tracking_heliostats, rch.normal_outline(color='b')) # Comment - fig_record.comment.append( - "A subset of heliostats selected, so that plot is effectively zoomed in." - ) + fig_record.comment.append("A subset of heliostats selected, so that plot is effectively zoomed in.") fig_record.comment.append("Blue heliostats are tracking.") fig_record.comment.append("Cyan heliostats are face up.") fig_record.comment.append("Red heliostats are in stow (out of service).") @@ -424,59 +342,39 @@ def draw_demonstration_figures( ) view = fig_record.view # Tracking setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_vector_field(color='b') # Comment - fig_record.comment.append( - "Each heliostat's surface normal, which can be viewed as a vector field." - ) + fig_record.comment.append("Each heliostat's surface normal, which can be viewed as a vector field.") # Draw solar_field.draw(view, solar_field_style) - view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') view.show() # Dense vector field. if draw_dense_vector_field: # View setup (xy only) fig_record = fm.setup_figure_for_3d_data( - figure_control, - axis_control_m, - vs.view_spec_xy(), - title='Dense Tracking Vector Field', + figure_control, axis_control_m, vs.view_spec_xy(), title='Dense Tracking Vector Field' ) view_xy = fig_record.view # Tracking setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_vector_field_outlines(color='grey') # Comment fig_record.comment.append("Dense vector field of tracking surface normals.") # Draw solar field and aim point. solar_field.draw(view_xy, solar_field_style) - view_xy.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + view_xy.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') # Draw dense vector field. grid_xy = solar_field.heliostat_field_regular_grid_xy(40, 20) grid_xydxy = [ - [ - p, - sun_track.tracking_surface_normal_xy( - p + [0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz - ), - ] + [p, sun_track.tracking_surface_normal_xy(p + [0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz)] for p in grid_xy ] - view_xy.draw_pqdpq_list( - grid_xydxy, style=rcps.vector_field(color='b', vector_scale=5.0) - ) + view_xy.draw_pqdpq_list(grid_xydxy, style=rcps.vector_field(color='b', vector_scale=5.0)) # Finish. view.show() @@ -551,9 +449,7 @@ def draw_demonstration_figures( if save_figures: print('\n\nSaving figures...') # Output directory. - output_path = os.path.join( - '..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M')) - ) + output_path = os.path.join('..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M'))) if not (os.path.exists(output_path)): os.makedirs(output_path) fm.save_all_figures(output_path) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py index 064ac122..4db6b443 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py @@ -55,9 +55,7 @@ # Load solar field data. file_field = './data/Solar_Field.csv' file_centroids_offsets = './data/Facets_Centroids.csv' - solar_field = sf.SolarField( - file_field=file_field, file_centroids_offsets=file_centroids_offsets - ) + solar_field = sf.SolarField(file_field=file_field, file_centroids_offsets=file_centroids_offsets) # Define tracking time. aimpoint = [60.0, 8.8, 28.9] @@ -130,21 +128,13 @@ # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters['section_plane_tolerance'] = ( - 3 # m. Lateral distance to include heliostats in section. - ) - scan_parameters['p_margin'] = ( - 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance of highest possible heliostat point. - ) + scan_parameters['section_plane_tolerance'] = 3 # m. Lateral distance to include heliostats in section. + scan_parameters['p_margin'] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + scan_parameters['altitude_margin'] = 2.5 # m. Clearance of highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -156,22 +146,10 @@ E05_x = 43.9 # m. E06_x = 53.5 # m. E07_x = 63.4 # m. - segment_xy_E04 = [ - [E04_x, R05_y], - [E04_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E05 = [ - [E05_x, R05_y], - [E05_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E06 = [ - [E06_x, R05_y], - [E06_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E07 = [ - [E07_x, R05_y], - [E07_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E04 = [[E04_x, R05_y], [E04_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E05 = [[E05_x, R05_y], [E05_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E06 = [[E06_x, R05_y], [E06_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E07 = [[E07_x, R05_y], [E07_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN # Construct individual scan passes. # # pass_E04 = sp.ScanPass(solar_field, segment_xy_E04, scan_parameters) # ?? SCAFFOLDING RCB -- CRASHES @@ -190,9 +168,7 @@ # Write the flight plan file. # Output directory. - output_path = os.path.join( - '..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M')) - ) + output_path = os.path.join('..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M'))) if not (os.path.exists(output_path)): os.makedirs(output_path) flight_plan.save_to_lichi_csv(output_path, elevation_offset) @@ -208,9 +184,7 @@ # analysis_render_control.draw_single_heliostat_analysis_list = ['5E6', '6E6', '7E6', '8E6', '13E6', '14E6'] # analysis_render_control.draw_single_heliostat_analysis_list = ['10E6'] # analysis_render_control.draw_single_heliostat_analysis_list = ['12E6', '13E6'] - analysis_render_control.draw_single_heliostat_analysis_list = ( - pass_E06.heliostat_name_list - ) + analysis_render_control.draw_single_heliostat_analysis_list = pass_E06.heliostat_name_list analysis_render_control.draw_single_heliostat_constraints = False # analysis_render_control.draw_single_heliostat_constraints_heliostats = False # analysis_render_control.draw_single_heliostat_constraints_mnsa_ray = False @@ -240,9 +214,7 @@ analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py index 0429f945..59052c20 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py @@ -42,21 +42,13 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters['section_plane_tolerance'] = ( - 3 # m. Lateral distance to include heliostats in section. - ) - scan_parameters['p_margin'] = ( - 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance of highest possible heliostat point. - ) + scan_parameters['section_plane_tolerance'] = 3 # m. Lateral distance to include heliostats in section. + scan_parameters['p_margin'] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + scan_parameters['altitude_margin'] = 2.5 # m. Clearance of highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -69,26 +61,11 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): E06_x = 53.5 # m. E07_x = 63.4 # m. E08_x = 73.2 # m. - segment_xy_E04 = [ - [E04_x, R05_y], - [E04_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E05 = [ - [E05_x, R05_y], - [E05_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E06 = [ - [E06_x, R05_y], - [E06_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E07 = [ - [E07_x, R05_y], - [E07_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E08 = [ - [E08_x, R05_y], - [E08_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E04 = [[E04_x, R05_y], [E04_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E05 = [[E05_x, R05_y], [E05_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E06 = [[E06_x, R05_y], [E06_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E07 = [[E07_x, R05_y], [E07_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E08 = [[E08_x, R05_y], [E08_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN # Construct individual UFACET scan passes. # # ufacet_pass_E04 = usp.UfacetScanPass(solar_field, segment_xy_E04, scan_parameters) # ?? SCAFFOLDING RCB -- CRASHES @@ -97,17 +74,10 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): ufacet_pass_E07 = usp.UfacetScanPass(solar_field, segment_xy_E07, scan_parameters) ufacet_pass_E08 = usp.UfacetScanPass(solar_field, segment_xy_E08, scan_parameters) - ufacet_scan_pass_list = [ - ufacet_pass_E05, - ufacet_pass_E06, - ufacet_pass_E07, - ufacet_pass_E08, - ] + ufacet_scan_pass_list = [ufacet_pass_E05, ufacet_pass_E06, ufacet_pass_E07, ufacet_pass_E08] # Construct the scan. - scan = Scan.construct_scan_given_UFACET_scan_passes( - ufacet_scan_pass_list, lead_in, run_past - ) + scan = Scan.construct_scan_given_UFACET_scan_passes(ufacet_scan_pass_list, lead_in, run_past) # Return. return scan @@ -123,9 +93,7 @@ def render_ufacet_scan(figure_control, scan): # analysis_render_control.draw_single_heliostat_analysis_list = ['10E6'] # analysis_render_control.draw_single_heliostat_analysis_list = ['12E6', '13E6'] single_heliostat_render_pass = scan.passes[0].ufacet_scan_pass() - analysis_render_control.draw_single_heliostat_analysis_list = ( - single_heliostat_render_pass.heliostat_name_list - ) + analysis_render_control.draw_single_heliostat_analysis_list = single_heliostat_render_pass.heliostat_name_list # analysis_render_control.draw_single_heliostat_analysis_list = ufacet_pass_E06.heliostat_name_list analysis_render_control.draw_single_heliostat_constraints = False # analysis_render_control.draw_single_heliostat_constraints_heliostats = False @@ -156,9 +124,7 @@ def render_ufacet_scan(figure_control, scan): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -180,9 +146,7 @@ def render_ufacet_scan(figure_control, scan): # Render the analysis. for scan_pass in scan.passes: - scan_pass.ufacet_scan_pass().draw_section_analysis( - figure_control, analysis_render_control - ) + scan_pass.ufacet_scan_pass().draw_section_analysis(figure_control, analysis_render_control) # ------------------------------------------------------------------------------------------------------- @@ -191,13 +155,7 @@ def render_ufacet_scan(figure_control, scan): def construct_raster_survey_scan( - solar_field, - scan_segment_spec, - n_horizontal, - n_vertical, - lead_in, - run_past, - fly_forward_backward, + solar_field, scan_segment_spec, n_horizontal, n_vertical, lead_in, run_past, fly_forward_backward ): # Construct segments spanning the region of interest. heliostat_xyz_list = solar_field.heliostat_origin_xyz_list @@ -260,9 +218,7 @@ def construct_raster_survey_scan( # Load solar field data. file_field = './data/Solar_Field.csv' file_centroids_offsets = './data/Facets_Centroids.csv' - solar_field = SolarField( - file_field=file_field, file_centroids_offsets=file_centroids_offsets - ) + solar_field = SolarField(file_field=file_field, file_centroids_offsets=file_centroids_offsets) # Define tracking time. # aimpoint = [60.0, 8.8, 28.9] @@ -363,13 +319,7 @@ def construct_raster_survey_scan( scan_segment_spec['speed'] = speed # Construct the scan. scan = construct_raster_survey_scan( - solar_field, - scan_segment_spec, - n_horizontal, - n_vertical, - lead_in, - run_past, - fly_forward_backward, + solar_field, scan_segment_spec, n_horizontal, n_vertical, lead_in, run_past, fly_forward_backward ) # Construct the flight plan. @@ -387,23 +337,13 @@ def construct_raster_survey_scan( # Flight over solar field draw style. rcfosf_default = rcfosf.default() - rcfosf_vfield = rcfosf.RenderControlFlightOverSolarField( - solar_field_style=rcsf.heliostat_vector_field_outlines() - ) + rcfosf_vfield = rcfosf.RenderControlFlightOverSolarField(solar_field_style=rcsf.heliostat_vector_field_outlines()) # Draw the flight plan. - fosf.draw_flight_over_solar_field( - figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_3d() - ) - fosf.draw_flight_over_solar_field( - figure_control, flight_over_solar_field, rcfosf_vfield, vs.view_spec_xy() - ) - fosf.draw_flight_over_solar_field( - figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_xz() - ) - fosf.draw_flight_over_solar_field( - figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_yz() - ) + fosf.draw_flight_over_solar_field(figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_3d()) + fosf.draw_flight_over_solar_field(figure_control, flight_over_solar_field, rcfosf_vfield, vs.view_spec_xy()) + fosf.draw_flight_over_solar_field(figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_xz()) + fosf.draw_flight_over_solar_field(figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_yz()) # # Summarize. # print('\n\nFigure Summary:') diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py index a14a07b1..d0d27d06 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py @@ -58,21 +58,13 @@ def construct_ufacet_scan_pass(solar_field, lead_in, run_past): # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters['section_plane_tolerance'] = ( - 3 # m. Lateral distance to include heliostats in section. - ) - scan_parameters['p_margin'] = ( - 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance of highest possible heliostat point. - ) + scan_parameters['section_plane_tolerance'] = 3 # m. Lateral distance to include heliostats in section. + scan_parameters['p_margin'] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + scan_parameters['altitude_margin'] = 2.5 # m. Clearance of highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -85,26 +77,11 @@ def construct_ufacet_scan_pass(solar_field, lead_in, run_past): E06_x = 53.5 # m. E07_x = 63.4 # m. E08_x = 73.2 # m. - segment_xy_E04 = [ - [E04_x, R05_y], - [E04_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E05 = [ - [E05_x, R05_y], - [E05_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E06 = [ - [E06_x, R05_y], - [E06_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E07 = [ - [E07_x, R05_y], - [E07_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E08 = [ - [E08_x, R05_y], - [E08_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E04 = [[E04_x, R05_y], [E04_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E05 = [[E05_x, R05_y], [E05_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E06 = [[E06_x, R05_y], [E06_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E07 = [[E07_x, R05_y], [E07_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E08 = [[E08_x, R05_y], [E08_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN # Construct individual UFACET scan passes. # # ufacet_pass_E04 = usp.UfacetScanPass(solar_field, segment_xy_E04, scan_parameters) # ?? SCAFFOLDING RCB -- CRASHES @@ -113,17 +90,10 @@ def construct_ufacet_scan_pass(solar_field, lead_in, run_past): ufacet_pass_E07 = usp.UfacetScanPass(solar_field, segment_xy_E07, scan_parameters) ufacet_pass_E08 = usp.UfacetScanPass(solar_field, segment_xy_E08, scan_parameters) - ufacet_scan_pass_list = [ - ufacet_pass_E05, - ufacet_pass_E06, - ufacet_pass_E07, - ufacet_pass_E08, - ] + ufacet_scan_pass_list = [ufacet_pass_E05, ufacet_pass_E06, ufacet_pass_E07, ufacet_pass_E08] # Construct the scan. - scan = Scan.construct_scan_given_UFACET_scan_passes( - ufacet_scan_pass_list, lead_in, run_past - ) + scan = Scan.construct_scan_given_UFACET_scan_passes(ufacet_scan_pass_list, lead_in, run_past) # Return. return scan @@ -138,21 +108,13 @@ def construct_ufacet_scan_passes(solar_field, lead_in, run_past): # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters['section_plane_tolerance'] = ( - 3 # m. Lateral distance to include heliostats in section. - ) - scan_parameters['p_margin'] = ( - 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance of highest possible heliostat point. - ) + scan_parameters['section_plane_tolerance'] = 3 # m. Lateral distance to include heliostats in section. + scan_parameters['p_margin'] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + scan_parameters['altitude_margin'] = 2.5 # m. Clearance of highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -165,26 +127,11 @@ def construct_ufacet_scan_passes(solar_field, lead_in, run_past): E06_x = 53.5 # m. E07_x = 63.4 # m. E08_x = 73.2 # m. - segment_xy_E04 = [ - [E04_x, R05_y], - [E04_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E05 = [ - [E05_x, R05_y], - [E05_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E06 = [ - [E06_x, R05_y], - [E06_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E07 = [ - [E07_x, R05_y], - [E07_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E08 = [ - [E08_x, R05_y], - [E08_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E04 = [[E04_x, R05_y], [E04_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E05 = [[E05_x, R05_y], [E05_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E06 = [[E06_x, R05_y], [E06_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E07 = [[E07_x, R05_y], [E07_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E08 = [[E08_x, R05_y], [E08_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN # Construct individual UFACET scan passes. # # ufacet_pass_E04 = usp.UfacetScanPass(solar_field, segment_xy_E04, scan_parameters) # ?? SCAFFOLDING RCB -- CRASHES @@ -193,17 +140,10 @@ def construct_ufacet_scan_passes(solar_field, lead_in, run_past): ufacet_pass_E07 = usp.UfacetScanPass(solar_field, segment_xy_E07, scan_parameters) ufacet_pass_E08 = usp.UfacetScanPass(solar_field, segment_xy_E08, scan_parameters) - ufacet_scan_pass_list = [ - ufacet_pass_E05, - ufacet_pass_E06, - ufacet_pass_E07, - ufacet_pass_E08, - ] + ufacet_scan_pass_list = [ufacet_pass_E05, ufacet_pass_E06, ufacet_pass_E07, ufacet_pass_E08] # Construct the scan. - scan = Scan.construct_scan_given_UFACET_scan_passes( - ufacet_scan_pass_list, lead_in, run_past - ) + scan = Scan.construct_scan_given_UFACET_scan_passes(ufacet_scan_pass_list, lead_in, run_past) # Return. return scan @@ -218,21 +158,13 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters['section_plane_tolerance'] = ( - 3 # m. Lateral distance to include heliostats in section. - ) - scan_parameters['p_margin'] = ( - 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance of highest possible heliostat point. - ) + scan_parameters['section_plane_tolerance'] = 3 # m. Lateral distance to include heliostats in section. + scan_parameters['p_margin'] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + scan_parameters['altitude_margin'] = 2.5 # m. Clearance of highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -245,26 +177,11 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): E06_x = 53.5 # m. E07_x = 63.4 # m. E08_x = 73.2 # m. - segment_xy_E04 = [ - [E04_x, R05_y], - [E04_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E05 = [ - [E05_x, R05_y], - [E05_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E06 = [ - [E06_x, R05_y], - [E06_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E07 = [ - [E07_x, R05_y], - [E07_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E08 = [ - [E08_x, R05_y], - [E08_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E04 = [[E04_x, R05_y], [E04_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E05 = [[E05_x, R05_y], [E05_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E06 = [[E06_x, R05_y], [E06_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E07 = [[E07_x, R05_y], [E07_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E08 = [[E08_x, R05_y], [E08_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN # Construct individual UFACET scan passes. # # ufacet_pass_E04 = usp.UfacetScanPass(solar_field, segment_xy_E04, scan_parameters) # ?? SCAFFOLDING RCB -- CRASHES @@ -273,17 +190,10 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): ufacet_pass_E07 = usp.UfacetScanPass(solar_field, segment_xy_E07, scan_parameters) ufacet_pass_E08 = usp.UfacetScanPass(solar_field, segment_xy_E08, scan_parameters) - ufacet_scan_pass_list = [ - ufacet_pass_E05, - ufacet_pass_E06, - ufacet_pass_E07, - ufacet_pass_E08, - ] + ufacet_scan_pass_list = [ufacet_pass_E05, ufacet_pass_E06, ufacet_pass_E07, ufacet_pass_E08] # Construct the scan. - scan = Scan.construct_scan_given_UFACET_scan_passes( - ufacet_scan_pass_list, lead_in, run_past - ) + scan = Scan.construct_scan_given_UFACET_scan_passes(ufacet_scan_pass_list, lead_in, run_past) # Return. return scan @@ -351,9 +261,7 @@ def setup_render_control_scan_section_analysis(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -381,9 +289,7 @@ def render_ufacet_scan(figure_control, scan): # Render the analysis. for scan_pass in scan.passes: - scan_pass.ufacet_scan_pass().draw_section_analysis( - figure_control, analysis_render_control - ) + scan_pass.ufacet_scan_pass().draw_section_analysis(figure_control, analysis_render_control) # ------------------------------------------------------------------------------------------------------- @@ -450,11 +356,11 @@ def render_ufacet_scan(figure_control, scan): n_vertical = 6 # Number of vertical passes. # UFACET scan parameters - candidate_margin_w = ( - 10.00 # m. Margin on either side of section plane to bring in heliostats. - ) + candidate_margin_w = 10.00 # m. Margin on either side of section plane to bring in heliostats. # Should be larger than side-to-side heliostat distance. - discard_threshold_p = 9.00 # m. Threshold to discarb heliostats that are close togethre ona section, presumably abreast. + discard_threshold_p = ( + 9.00 # m. Threshold to discarb heliostats that are close togethre ona section, presumably abreast. + ) # Should be smaller than minimum heliostat row spacing. # Scan flight parameters (both raster and UFACET). @@ -467,9 +373,7 @@ def render_ufacet_scan(figure_control, scan): # curve_keys_x = [-43.9] # ?? SCAFFOLDING RCB -- TEMPORARY # curve_keys_x = [92.7] # ?? SCAFFOLDING RCB -- TEMPORARY curve_keys_y = [136.9] * len(curve_keys_x) - curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(curve_keys_x, curve_keys_y) - ] + curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(curve_keys_x, curve_keys_y)] # UFACET (x,y) analysis. list_of_ideal_xy_lists, list_of_best_fit_segment_xys = pusxya.ufacet_xy_analysis( @@ -492,34 +396,19 @@ def render_ufacet_scan(figure_control, scan): # UFACET section analysis. section_list = pussc.construct_ufacet_sections( - solar_field, - list_of_best_fit_segment_xys, - candidate_margin_w, - discard_threshold_p, + solar_field, list_of_best_fit_segment_xys, candidate_margin_w, discard_threshold_p ) # Draw UFACET section analysis. render_control_scan_section_setup = setup_render_control_scan_section_setup() pusscr.draw_construct_ufacet_sections( - figure_control, - solar_field, - section_list, - vs.view_spec_3d(), - render_control_scan_section_setup, + figure_control, solar_field, section_list, vs.view_spec_3d(), render_control_scan_section_setup ) pusscr.draw_construct_ufacet_sections( - figure_control, - solar_field, - section_list, - vs.view_spec_xy(), - render_control_scan_section_setup, + figure_control, solar_field, section_list, vs.view_spec_xy(), render_control_scan_section_setup ) pusscr.draw_construct_ufacet_sections( - figure_control, - solar_field, - section_list, - None, - render_control_scan_section_setup, + figure_control, solar_field, section_list, None, render_control_scan_section_setup ) # Use section view. # # Construct the scan. @@ -581,9 +470,7 @@ def render_ufacet_scan(figure_control, scan): if save_figures: print('\n\nSaving figures...') # Output directory. - output_path = os.path.join( - '..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M')) - ) + output_path = os.path.join('..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M'))) if not (os.path.exists(output_path)): os.makedirs(output_path) fm.save_all_figures(output_path) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/05_main_ufacet_scan_pass_build.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/05_main_ufacet_scan_pass_build.py index 8fe851af..ae0f14fa 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/05_main_ufacet_scan_pass_build.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/05_main_ufacet_scan_pass_build.py @@ -114,9 +114,7 @@ def setup_render_control_scan_section_analysis(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -172,27 +170,17 @@ def setup_render_control_scan_section_analysis(): # Per-run input parameters. # scan_type = 'Raster' - raster_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + raster_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY scan_type = 'UFACET' - ufacet_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + ufacet_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY elevation_offset = 0.0 # m. # Define solar field. solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) solar_field_spec['field_heliostat_file'] = './data/NSTTF_Heliostats.csv' solar_field_spec['field_facet_centroids_file'] = './data/NSTTF_Facet_Centroids.csv' diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py index ef6bfc1a..1e5f819e 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py @@ -93,9 +93,7 @@ def setup_render_control_scan_section_analysis(): # analysis_render_control.draw_single_heliostat_constraints_assessed_normals = False # analysis_render_control.draw_single_heliostat_constraints_detail = False # analysis_render_control.draw_single_heliostat_constraints_all_targets = False - analysis_render_control.draw_single_heliostat_constraints_summary = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_single_heliostat_constraints_summary = False # KEY SUMMARY # analysis_render_control.draw_single_heliostat_constraints_gaze_example = False # analysis_render_control.draw_single_heliostat_constraints_gaze_example_C = C_draw analysis_render_control.draw_single_heliostat_constraints_legend = False @@ -117,9 +115,7 @@ def setup_render_control_scan_section_analysis(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False # analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -180,14 +176,10 @@ def setup_render_control_scan_section_analysis(): # Scan control parameters. # Raster. # scan_type = 'Raster' - raster_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + raster_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY # UFACET. scan_type = 'UFACET' - ufacet_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + ufacet_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY # Define UFACET control flags. ufacet_control_parameters = {} # Seed points. @@ -198,14 +190,10 @@ def setup_render_control_scan_section_analysis(): # ufacet_curve_keys_x = [92.7] # ?? SCAFFOLDING RCB -- TEMPORARY # ufacet_curve_keys_x = [112.2] # ?? SCAFFOLDING RCB -- TEMPORARY ufacet_curve_keys_y = [136.9] * len(ufacet_curve_keys_x) - ufacet_curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y) - ] + ufacet_curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y)] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters['maximum_altitude'] = ( - 25.0 # m. Maximum altitude, roughly AGL, including slope effects. - ) + ufacet_control_parameters['maximum_altitude'] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -216,14 +204,8 @@ def setup_render_control_scan_section_analysis(): solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) solar_field_spec['field_heliostat_file'] = './data/NSTTF_Heliostats.csv' solar_field_spec['field_facet_centroids_file'] = './data/NSTTF_Facet_Centroids.csv' diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py index ef6bfc1a..1e5f819e 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py @@ -93,9 +93,7 @@ def setup_render_control_scan_section_analysis(): # analysis_render_control.draw_single_heliostat_constraints_assessed_normals = False # analysis_render_control.draw_single_heliostat_constraints_detail = False # analysis_render_control.draw_single_heliostat_constraints_all_targets = False - analysis_render_control.draw_single_heliostat_constraints_summary = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_single_heliostat_constraints_summary = False # KEY SUMMARY # analysis_render_control.draw_single_heliostat_constraints_gaze_example = False # analysis_render_control.draw_single_heliostat_constraints_gaze_example_C = C_draw analysis_render_control.draw_single_heliostat_constraints_legend = False @@ -117,9 +115,7 @@ def setup_render_control_scan_section_analysis(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False # analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -180,14 +176,10 @@ def setup_render_control_scan_section_analysis(): # Scan control parameters. # Raster. # scan_type = 'Raster' - raster_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + raster_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY # UFACET. scan_type = 'UFACET' - ufacet_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + ufacet_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY # Define UFACET control flags. ufacet_control_parameters = {} # Seed points. @@ -198,14 +190,10 @@ def setup_render_control_scan_section_analysis(): # ufacet_curve_keys_x = [92.7] # ?? SCAFFOLDING RCB -- TEMPORARY # ufacet_curve_keys_x = [112.2] # ?? SCAFFOLDING RCB -- TEMPORARY ufacet_curve_keys_y = [136.9] * len(ufacet_curve_keys_x) - ufacet_curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y) - ] + ufacet_curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y)] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters['maximum_altitude'] = ( - 25.0 # m. Maximum altitude, roughly AGL, including slope effects. - ) + ufacet_control_parameters['maximum_altitude'] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -216,14 +204,8 @@ def setup_render_control_scan_section_analysis(): solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) solar_field_spec['field_heliostat_file'] = './data/NSTTF_Heliostats.csv' solar_field_spec['field_facet_centroids_file'] = './data/NSTTF_Facet_Centroids.csv' diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py index b737e650..391abe43 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py @@ -93,9 +93,7 @@ def setup_render_control_scan_section_analysis(): # analysis_render_control.draw_single_heliostat_constraints_assessed_normals = False # analysis_render_control.draw_single_heliostat_constraints_detail = False # analysis_render_control.draw_single_heliostat_constraints_all_targets = False - analysis_render_control.draw_single_heliostat_constraints_summary = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_single_heliostat_constraints_summary = False # KEY SUMMARY # analysis_render_control.draw_single_heliostat_constraints_gaze_example = False # analysis_render_control.draw_single_heliostat_constraints_gaze_example_C = C_draw analysis_render_control.draw_single_heliostat_constraints_legend = False @@ -117,9 +115,7 @@ def setup_render_control_scan_section_analysis(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False # analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -180,14 +176,10 @@ def setup_render_control_scan_section_analysis(): # Scan control parameters. # Raster. # scan_type = 'Raster' - raster_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + raster_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY # UFACET. scan_type = 'UFACET' - ufacet_scan_parameter_file = ( - 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY - ) + ufacet_scan_parameter_file = 'DUMMY FILL IN LATER' # ?? SCAFFOLDING RCB -- TEMPORARY # Define UFACET control flags. ufacet_control_parameters = {} # Seed points. @@ -198,15 +190,11 @@ def setup_render_control_scan_section_analysis(): # ufacet_curve_keys_x = [92.7] # ?? SCAFFOLDING RCB -- TEMPORARY # ufacet_curve_keys_x = [112.2] # ?? SCAFFOLDING RCB -- TEMPORARY ufacet_curve_keys_y = [136.9] * len(ufacet_curve_keys_x) - ufacet_curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y) - ] + ufacet_curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y)] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. # ufacet_control_parameters['maximum_altitude'] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. - ufacet_control_parameters['maximum_altitude'] = ( - 18.0 # m. Maximum altitude, roughly AGL, including slope effects. - ) + ufacet_control_parameters['maximum_altitude'] = 18.0 # m. Maximum altitude, roughly AGL, including slope effects. # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -217,14 +205,8 @@ def setup_render_control_scan_section_analysis(): solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - nll.LON_NSTTF_ORIGIN, - nll.LAT_NSTTF_ORIGIN, - ) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (nll.LON_NSTTF_ORIGIN, nll.LAT_NSTTF_ORIGIN) solar_field_spec['field_heliostat_file'] = './data/NSTTF_Heliostats.csv' solar_field_spec['field_facet_centroids_file'] = './data/NSTTF_Facet_Centroids.csv' diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py index d441a188..c1cf9137 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py @@ -35,9 +35,7 @@ # Load solar field data. file_field = './data/Solar_Field.csv' file_centroids_offsets = './data/Facets_Centroids.csv' - solar_field = SolarField( - file_field=file_field, file_centroids_offsets=file_centroids_offsets - ) + solar_field = SolarField(file_field=file_field, file_centroids_offsets=file_centroids_offsets) # Define tracking time. aimpoint = [60.0, 8.8, 28.9] @@ -110,21 +108,13 @@ # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters['section_plane_tolerance'] = ( - 3 # m. Lateral distance to include heliostats in section. - ) - scan_parameters['p_margin'] = ( - 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance of highest possible heliostat point. - ) + scan_parameters['section_plane_tolerance'] = 3 # m. Lateral distance to include heliostats in section. + scan_parameters['p_margin'] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + scan_parameters['altitude_margin'] = 2.5 # m. Clearance of highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -136,22 +126,10 @@ E05_x = 43.9 # m. E06_x = 53.5 # m. E07_x = 63.4 # m. - segment_xy_E04 = [ - [E04_x, R05_y], - [E04_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E05 = [ - [E05_x, R05_y], - [E05_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E06 = [ - [E06_x, R05_y], - [E06_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN - segment_xy_E07 = [ - [E07_x, R05_y], - [E07_x, R14_y], - ] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E04 = [[E04_x, R05_y], [E04_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E05 = [[E05_x, R05_y], [E05_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E06 = [[E06_x, R05_y], [E06_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN + segment_xy_E07 = [[E07_x, R05_y], [E07_x, R14_y]] # ?? SCAFFOLDING RCB -- REPLACE WITH FLIGHT PLAN # Construct individual scan passes. # # pass_E04 = sp.ScanPass(solar_field, segment_xy_E04, scan_parameters) # ?? SCAFFOLDING RCB -- CRASHES @@ -170,9 +148,7 @@ # Write the flight plan file. # Output directory. - output_path = os.path.join( - '..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M')) - ) + output_path = os.path.join('..', ('output_' + datetime.now().strftime('%Y_%m_%d_%H%M'))) if not (os.path.exists(output_path)): os.makedirs(output_path) flight_plan.save_to_lichi_csv(output_path, elevation_offset) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightOverSolarField.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightOverSolarField.py index 38e26f0a..7080b6bb 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightOverSolarField.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightOverSolarField.py @@ -12,9 +12,7 @@ class FlightOverSolarField: Represents a flight over a solar field, for rendering and analysis. """ - def __init__( - self, solar_field, flight_plan # SolarField class object. - ): # FlightPlan class object. + def __init__(self, solar_field, flight_plan): # SolarField class object. # FlightPlan class object. super(FlightOverSolarField, self).__init__() self.solar_field = solar_field @@ -34,18 +32,14 @@ def draw(self, view, flight_over_solar_field_style): # -def draw_flight_over_solar_field( - figure_control, flight_over_solar_field, flight_over_solar_field_style, view_spec -): +def draw_flight_over_solar_field(figure_control, flight_over_solar_field, flight_over_solar_field_style, view_spec): # Assumes that solar field and flight plan are already set up with heliosat configurations, waypoints, etc. # Construct title. title = flight_over_solar_field.flight_plan.name name = flight_over_solar_field.flight_plan.short_name # Setup figure. - fig_record = fm.setup_figure_for_3d_data( - figure_control, rca.meters(), view_spec, title=title, name=name - ) + fig_record = fm.setup_figure_for_3d_data(figure_control, rca.meters(), view_spec, title=title, name=name) view = fig_record.view # Comment. fig_record.comment.append(title) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightPlan.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightPlan.py index b147d617..6415afec 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightPlan.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/FlightPlan.py @@ -45,9 +45,7 @@ def set_waypoints_from_scans(self, scans): # ?? SCAFFOLDING RCB -- MAKE WHETHER TO DO THIS CONTROLLABLE. # Add preliminary waypoint, to ensure gimbal is established for first scan pass. if len(waypoint_list) == 0: - print( - 'ERROR: In FlightPlan.set_waypoints_from_scans(), unexpected empty waypoint list encountered.' - ) + print('ERROR: In FlightPlan.set_waypoints_from_scans(), unexpected empty waypoint list encountered.') assert False first_scan_waypt = waypoint_list[0] first_scan_x = first_scan_waypt.xyz[0] @@ -73,9 +71,7 @@ def set_waypoint_numbers(self): def waypoints(self): if self._waypoint_list == None: - print( - 'ERROR: In FlightPlan.waypoints(), attempt to fetch unset _waypoint_list.' - ) + print('ERROR: In FlightPlan.waypoints(), attempt to fetch unset _waypoint_list.') assert False return self._waypoint_list @@ -119,17 +115,13 @@ def waypoints(self): # i += 1 # output_stream.close() - def save_to_litchi_csv( - self, output_path, elevation_offset # Directory to write to. - ): # m. + def save_to_litchi_csv(self, output_path, elevation_offset): # Directory to write to. # m. # Construct input template file path. template_dir_path = os.path.join('..', 'U_Code_data', self.locale) template_base_name = 'Litchi_Template' locale_file_str = ft.convert_string_to_file_body(self.locale) launch_file_str = ft.convert_string_to_file_body(self.launch_name) - template_file_name = ( - template_base_name + '_' + locale_file_str + '_' + launch_file_str + '.csv' - ) + template_file_name = template_base_name + '_' + locale_file_str + '_' + launch_file_str + '.csv' template_path_file = os.path.join(template_dir_path, template_file_name) # Read input template. template_lines = [] @@ -140,9 +132,7 @@ def save_to_litchi_csv( # Check input. if len(template_lines) < 3: - print( - 'ERROR: In FlightPlan.save_to_litchi_csv(), fewer than three lines in template.' - ) + print('ERROR: In FlightPlan.save_to_litchi_csv(), fewer than three lines in template.') assert False # Find indices of key column headings. @@ -178,9 +168,7 @@ def save_to_litchi_csv( data_list = data_template_list.copy() data_list[longitude_idx] = '{0:.8f}'.format(waypoint.lon) data_list[latitude_idx] = '{0:.8f}'.format(waypoint.lat) - data_list[altitude_idx] = '{0:.3f}'.format( - waypoint.xyz[2] + elevation_offset - ) + data_list[altitude_idx] = '{0:.3f}'.format(waypoint.xyz[2] + elevation_offset) data_list[heading_idx] = '{0:.6f}'.format(waypoint.heading_deg()) data_list[gimbal_pitch_idx] = '{0:.6f}'.format(waypoint.gimbal_pitch_deg()) data_line = ','.join(data_list) @@ -219,15 +207,11 @@ def draw(self, view, flight_plan_style): # -def construct_flight_plan_from_scan( - name, short_name, launch_name, scan -): # Scan object. +def construct_flight_plan_from_scan(name, short_name, launch_name, scan): # Scan object. return construct_flight_plan_from_scans(name, short_name, launch_name, [scan]) -def construct_flight_plan_from_scans( - name, short_name, launch_name, scans -): # List of scan opbjects. +def construct_flight_plan_from_scans(name, short_name, launch_name, scans): # List of scan opbjects. # Notify progress. print('Constructing flight plan...') diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/Heliostat.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/Heliostat.py index d5b9caa4..dedc5381 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/Heliostat.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/Heliostat.py @@ -53,61 +53,41 @@ def __init__( self.top_left_facet = self.facets[self.facet_dict[str(1)]] self.top_right_facet = self.facets[self.facet_dict[str(num_cols)]] self.bottom_right_facet = self.facets[self.facet_dict[str(num_facets)]] - self.bottom_left_facet = self.facets[ - self.facet_dict[str(num_facets - num_cols + 1)] - ] + self.bottom_left_facet = self.facets[self.facet_dict[str(num_facets - num_cols + 1)]] # Heliostat Corners [offsets in terms of heliostat's centroid] self.top_left_corner_offset = [ - x + y - for x, y in zip( - self.top_left_facet.centroid_offset, - self.top_left_facet.top_left_corner_offset, - ) + x + y for x, y in zip(self.top_left_facet.centroid_offset, self.top_left_facet.top_left_corner_offset) ] self.top_right_corner_offset = [ - x + y - for x, y in zip( - self.top_right_facet.centroid_offset, - self.top_right_facet.top_right_corner_offset, - ) + x + y for x, y in zip(self.top_right_facet.centroid_offset, self.top_right_facet.top_right_corner_offset) ] self.bottom_right_corner_offset = [ x + y - for x, y in zip( - self.bottom_right_facet.centroid_offset, - self.bottom_right_facet.bottom_right_corner_offset, - ) + for x, y in zip(self.bottom_right_facet.centroid_offset, self.bottom_right_facet.bottom_right_corner_offset) ] self.bottom_left_corner_offset = [ x + y - for x, y in zip( - self.bottom_left_facet.centroid_offset, - self.bottom_left_facet.bottom_left_corner_offset, - ) + for x, y in zip(self.bottom_left_facet.centroid_offset, self.bottom_left_facet.bottom_left_corner_offset) ] # Centroid - self.centroid = [ - origin[0], - origin[1], - origin[2] + pivot_offset, - ] # Origin is at torque tube center. - - self.az = np.deg2rad( - 180 - ) # (az,el) = (180,90) degrees corresponds to pointing straight up, - self.el = np.deg2rad( - 90 - ) # as if transitioned by tilting up from face south orientation. + self.centroid = [origin[0], origin[1], origin[2] + pivot_offset] # Origin is at torque tube center. + + self.az = np.deg2rad(180) # (az,el) = (180,90) degrees corresponds to pointing straight up, + self.el = np.deg2rad(90) # as if transitioned by tilting up from face south orientation. self.surface_normal = [0, 0, 1] # self.rx_rotation = np.identity(3) self.rz_rotation = np.identity(3) self.set_corner_positions_in_space() # Tracking - self._aimpoint_xyz = None # (x,y,y) in m. Do not access this member externally; use aimpoint_xyz() function instead. - self._when_ymdhmsz = None # (y,m,d,h,m,s,z). Do not access this member externally; use when_ymdhmsz() function instead. + self._aimpoint_xyz = ( + None # (x,y,y) in m. Do not access this member externally; use aimpoint_xyz() function instead. + ) + self._when_ymdhmsz = ( + None # (y,m,d,h,m,s,z). Do not access this member externally; use when_ymdhmsz() function instead. + ) def facets_read_file(self, file): with open(file) as csvfile: @@ -124,12 +104,7 @@ def facets_read_file(self, file): name, x, y, z = str(row[0]), float(row[1]), float(row[2]), float(row[3]) # creating facet - facet = Facet( - name=name, - centroid_offset=[x, y, z], - width=self.facet_width, - height=self.facet_height, - ) + facet = Facet(name=name, centroid_offset=[x, y, z], width=self.facet_width, height=self.facet_height) # storing facets.append(facet) @@ -142,17 +117,13 @@ def facets_read_file(self, file): def aimpoint_xyz(self): if self._aimpoint_xyz == None: - print( - 'ERROR: In Heliostat.aimpoint_xyz(), attempt to fetch unset _aimpoint_xyz.' - ) + print('ERROR: In Heliostat.aimpoint_xyz(), attempt to fetch unset _aimpoint_xyz.') assert False return self._aimpoint_xyz def when_ymdhmsz(self): if self._when_ymdhmsz == None: - print( - 'ERROR: In Heliostat.when_ymdhmsz(), attempt to fetch unset _when_ymdhmsz.' - ) + print('ERROR: In Heliostat.when_ymdhmsz(), attempt to fetch unset _when_ymdhmsz.') assert False return self._when_ymdhmsz @@ -168,18 +139,14 @@ def surface_normal_ray(self, base, length): ray = [tail, head] return ray - def compute_tracking_configuration( - self, aimpoint_xyz, location_lon_lat, when_ymdhmsz - ): + def compute_tracking_configuration(self, aimpoint_xyz, location_lon_lat, when_ymdhmsz): # Heliostat centroid coordinates. # Coordinates are (x,z) center, z=0 is at torque tube height. h_tube = np.array(self.origin) h = h_tube # Later, add correction for center facet offset. # Compute heliostat surface normal which tracks the sun to the aimpoint. - n_xyz = sun_track.tracking_surface_normal_xyz( - h, aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) + n_xyz = sun_track.tracking_surface_normal_xyz(h, aimpoint_xyz, location_lon_lat, when_ymdhmsz) # Compute heliostat configuration. return hc.heliostat_configuration_given_surface_normal_xyz(n_xyz) @@ -197,12 +164,7 @@ def compute_stow_configuration(self): def corners(self): # Assumes that heliostat coordinates have been set, and the corners have been set. # Later we can add a more meaningful check for this. - return [ - self.top_left_corner, - self.top_right_corner, - self.bottom_right_corner, - self.bottom_left_corner, - ] + return [self.top_left_corner, self.top_right_corner, self.bottom_right_corner, self.bottom_left_corner] # MODIFICATION @@ -211,9 +173,7 @@ def set_tracking(self, aimpoint_xyz, location_lon_lat, when_ymdhmsz): self._aimpoint_xyz = aimpoint_xyz self._when_ymdhmsz = when_ymdhmsz # Set tracking configuration. - h_config = self.compute_tracking_configuration( - aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) + h_config = self.compute_tracking_configuration(aimpoint_xyz, location_lon_lat, when_ymdhmsz) self.set_configuration(h_config, clear_tracking=False) def set_stow(self): @@ -255,14 +215,10 @@ def set_configuration(self, h_config, clear_tracking=True): vector = np.array([0, 0, self.pivot_offset]) vector_offset = Rz_rotation.dot(Rx_rotation).dot(vector) - centroid = ( - np.array(self.origin) + vector_offset - ) # Origin is at torque tube center. + centroid = np.array(self.origin) + vector_offset # Origin is at torque tube center. hel_rotation = Rz_rotation.dot(Rx_rotation) - surface_normal = hel_rotation.dot( - [0, 0, 1] - ) # Before rotation, heliostat is face up. + surface_normal = hel_rotation.dot([0, 0, 1]) # Before rotation, heliostat is face up. self.el = el self.az = az @@ -276,18 +232,10 @@ def set_corner_positions_in_space(self): # Sets corner positions given heliostat configuration. hel_centroid = np.array(self.centroid) hel_rotation = self.rz_rotation.dot(self.rx_rotation) - self.top_left_corner = hel_centroid + hel_rotation.dot( - np.array(self.top_left_corner_offset) - ) - self.top_right_corner = hel_centroid + hel_rotation.dot( - np.array(self.top_right_corner_offset) - ) - self.bottom_right_corner = hel_centroid + hel_rotation.dot( - np.array(self.bottom_right_corner_offset) - ) - self.bottom_left_corner = hel_centroid + hel_rotation.dot( - np.array(self.bottom_left_corner_offset) - ) + self.top_left_corner = hel_centroid + hel_rotation.dot(np.array(self.top_left_corner_offset)) + self.top_right_corner = hel_centroid + hel_rotation.dot(np.array(self.top_right_corner_offset)) + self.bottom_right_corner = hel_centroid + hel_rotation.dot(np.array(self.bottom_right_corner_offset)) + self.bottom_left_corner = hel_centroid + hel_rotation.dot(np.array(self.bottom_left_corner_offset)) # RENDERING @@ -303,12 +251,7 @@ def draw(self, view, heliostat_styles): # Outline. if heliostat_style.draw_outline: - corners = [ - self.top_left_corner, - self.top_right_corner, - self.bottom_right_corner, - self.bottom_left_corner, - ] + corners = [self.top_left_corner, self.top_right_corner, self.bottom_right_corner, self.bottom_left_corner] view.draw_xyz_list(corners, close=True, style=heliostat_style.outline_style) # Facets. @@ -316,40 +259,24 @@ def draw(self, view, heliostat_styles): hel_rotation = self.rz_rotation.dot(self.rx_rotation) hel_centroid = self.centroid for facet in self.facets: - facet.draw( - view, heliostat_style.facet_styles, hel_centroid, hel_rotation - ) + facet.draw(view, heliostat_style.facet_styles, hel_centroid, hel_rotation) # Surface normal. if heliostat_style.draw_surface_normal: # Construct ray. - surface_normal_ray = self.surface_normal_ray( - self.centroid, heliostat_style.surface_normal_length - ) + surface_normal_ray = self.surface_normal_ray(self.centroid, heliostat_style.surface_normal_length) # Draw ray and its base. - view.draw_xyz( - self.centroid, style=heliostat_style.surface_normal_base_style - ) - view.draw_xyz_list( - surface_normal_ray, style=heliostat_style.surface_normal_style - ) + view.draw_xyz(self.centroid, style=heliostat_style.surface_normal_base_style) + view.draw_xyz_list(surface_normal_ray, style=heliostat_style.surface_normal_style) # Surface normal drawn at corners. # (Not the surface normal at the corner. Facet curvature is not shown.) if heliostat_style.draw_surface_normal_at_corners: # Construct rays. - top_left_ray = self.surface_normal_ray( - self.top_left_corner, heliostat_style.corner_normal_length - ) - top_right_ray = self.surface_normal_ray( - self.top_right_corner, heliostat_style.corner_normal_length - ) - bottom_left_ray = self.surface_normal_ray( - self.bottom_left_corner, heliostat_style.corner_normal_length - ) - bottom_right_ray = self.surface_normal_ray( - self.bottom_right_corner, heliostat_style.corner_normal_length - ) + top_left_ray = self.surface_normal_ray(self.top_left_corner, heliostat_style.corner_normal_length) + top_right_ray = self.surface_normal_ray(self.top_right_corner, heliostat_style.corner_normal_length) + bottom_left_ray = self.surface_normal_ray(self.bottom_left_corner, heliostat_style.corner_normal_length) + bottom_right_ray = self.surface_normal_ray(self.bottom_right_corner, heliostat_style.corner_normal_length) rays = [top_left_ray, top_right_ray, bottom_left_ray, bottom_right_ray] # Draw each ray and its base. for base, ray in zip(corners, rays): @@ -358,6 +285,4 @@ def draw(self, view, heliostat_styles): # Name. if heliostat_style.draw_name: - view.draw_xyz_text( - self.centroid, self.name, style=heliostat_style.name_style - ) + view.draw_xyz_text(self.centroid, self.name, style=heliostat_style.name_style) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlScanSectionAnalysis.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlScanSectionAnalysis.py index 78e45d15..cedc0a54 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlScanSectionAnalysis.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlScanSectionAnalysis.py @@ -91,175 +91,69 @@ def __init__( self.draw_single_heliostat_analysis = draw_single_heliostat_analysis self.draw_single_heliostat_analysis_list = draw_single_heliostat_analysis_list self.draw_single_heliostat_constraints = draw_single_heliostat_constraints - self.draw_single_heliostat_constraints_heliostats = ( - draw_single_heliostat_constraints_heliostats - ) - self.draw_single_heliostat_constraints_mnsa_ray = ( - draw_single_heliostat_constraints_mnsa_ray - ) - self.draw_single_heliostat_constraints_mxsa_ray = ( - draw_single_heliostat_constraints_mxsa_ray - ) - self.draw_single_heliostat_constraints_key_points = ( - draw_single_heliostat_constraints_key_points - ) - self.draw_single_heliostat_constraints_assessed_normals = ( - draw_single_heliostat_constraints_assessed_normals - ) - self.draw_single_heliostat_constraints_detail = ( - draw_single_heliostat_constraints_detail - ) - self.draw_single_heliostat_constraints_all_targets = ( - draw_single_heliostat_constraints_all_targets - ) - self.draw_single_heliostat_constraints_summary = ( - draw_single_heliostat_constraints_summary - ) - self.draw_single_heliostat_constraints_gaze_example = ( - draw_single_heliostat_constraints_gaze_example - ) - self.draw_single_heliostat_constraints_gaze_example_C = ( - draw_single_heliostat_constraints_gaze_example_C - ) - self.draw_single_heliostat_constraints_legend = ( - draw_single_heliostat_constraints_legend - ) + self.draw_single_heliostat_constraints_heliostats = draw_single_heliostat_constraints_heliostats + self.draw_single_heliostat_constraints_mnsa_ray = draw_single_heliostat_constraints_mnsa_ray + self.draw_single_heliostat_constraints_mxsa_ray = draw_single_heliostat_constraints_mxsa_ray + self.draw_single_heliostat_constraints_key_points = draw_single_heliostat_constraints_key_points + self.draw_single_heliostat_constraints_assessed_normals = draw_single_heliostat_constraints_assessed_normals + self.draw_single_heliostat_constraints_detail = draw_single_heliostat_constraints_detail + self.draw_single_heliostat_constraints_all_targets = draw_single_heliostat_constraints_all_targets + self.draw_single_heliostat_constraints_summary = draw_single_heliostat_constraints_summary + self.draw_single_heliostat_constraints_gaze_example = draw_single_heliostat_constraints_gaze_example + self.draw_single_heliostat_constraints_gaze_example_C = draw_single_heliostat_constraints_gaze_example_C + self.draw_single_heliostat_constraints_legend = draw_single_heliostat_constraints_legend self.draw_single_heliostat_gaze_angle = draw_single_heliostat_gaze_angle - self.draw_single_heliostat_gaze_angle_mnsa = ( - draw_single_heliostat_gaze_angle_mnsa - ) - self.draw_single_heliostat_gaze_angle_mxsa = ( - draw_single_heliostat_gaze_angle_mxsa - ) - self.draw_single_heliostat_gaze_angle_critical = ( - draw_single_heliostat_gaze_angle_critical - ) - self.draw_single_heliostat_gaze_angle_example = ( - draw_single_heliostat_gaze_angle_example - ) - self.draw_single_heliostat_gaze_angle_fill = ( - draw_single_heliostat_gaze_angle_fill - ) - self.draw_single_heliostat_gaze_angle_legend = ( - draw_single_heliostat_gaze_angle_legend - ) + self.draw_single_heliostat_gaze_angle_mnsa = draw_single_heliostat_gaze_angle_mnsa + self.draw_single_heliostat_gaze_angle_mxsa = draw_single_heliostat_gaze_angle_mxsa + self.draw_single_heliostat_gaze_angle_critical = draw_single_heliostat_gaze_angle_critical + self.draw_single_heliostat_gaze_angle_example = draw_single_heliostat_gaze_angle_example + self.draw_single_heliostat_gaze_angle_fill = draw_single_heliostat_gaze_angle_fill + self.draw_single_heliostat_gaze_angle_legend = draw_single_heliostat_gaze_angle_legend self.draw_single_heliostat_select_gaze = draw_single_heliostat_select_gaze - self.draw_single_heliostat_select_gaze_shifted = ( - draw_single_heliostat_select_gaze_shifted - ) - self.draw_single_heliostat_select_gaze_envelope = ( - draw_single_heliostat_select_gaze_envelope - ) - self.draw_single_heliostat_select_gaze_shrunk = ( - draw_single_heliostat_select_gaze_shrunk - ) - self.draw_single_heliostat_select_gaze_clipped = ( - draw_single_heliostat_select_gaze_clipped - ) - self.draw_single_heliostat_select_gaze_selected = ( - draw_single_heliostat_select_gaze_selected - ) - self.draw_single_heliostat_select_gaze_mnsa = ( - draw_single_heliostat_select_gaze_mnsa - ) - self.draw_single_heliostat_select_gaze_mxsa = ( - draw_single_heliostat_select_gaze_mxsa - ) - self.draw_single_heliostat_select_gaze_critical = ( - draw_single_heliostat_select_gaze_critical - ) - self.draw_single_heliostat_select_gaze_fill = ( - draw_single_heliostat_select_gaze_fill - ) - self.draw_single_heliostat_select_gaze_legend = ( - draw_single_heliostat_select_gaze_legend - ) + self.draw_single_heliostat_select_gaze_shifted = draw_single_heliostat_select_gaze_shifted + self.draw_single_heliostat_select_gaze_envelope = draw_single_heliostat_select_gaze_envelope + self.draw_single_heliostat_select_gaze_shrunk = draw_single_heliostat_select_gaze_shrunk + self.draw_single_heliostat_select_gaze_clipped = draw_single_heliostat_select_gaze_clipped + self.draw_single_heliostat_select_gaze_selected = draw_single_heliostat_select_gaze_selected + self.draw_single_heliostat_select_gaze_mnsa = draw_single_heliostat_select_gaze_mnsa + self.draw_single_heliostat_select_gaze_mxsa = draw_single_heliostat_select_gaze_mxsa + self.draw_single_heliostat_select_gaze_critical = draw_single_heliostat_select_gaze_critical + self.draw_single_heliostat_select_gaze_fill = draw_single_heliostat_select_gaze_fill + self.draw_single_heliostat_select_gaze_legend = draw_single_heliostat_select_gaze_legend self.draw_multi_heliostat_gaze_angle = draw_multi_heliostat_gaze_angle - self.draw_multi_heliostat_gaze_angle_per_heliostat = ( - draw_multi_heliostat_gaze_angle_per_heliostat - ) - self.draw_multi_heliostat_gaze_angle_envelope = ( - draw_multi_heliostat_gaze_angle_envelope - ) + self.draw_multi_heliostat_gaze_angle_per_heliostat = draw_multi_heliostat_gaze_angle_per_heliostat + self.draw_multi_heliostat_gaze_angle_envelope = draw_multi_heliostat_gaze_angle_envelope self.draw_multi_heliostat_gaze_angle_mnsa = draw_multi_heliostat_gaze_angle_mnsa self.draw_multi_heliostat_gaze_angle_mxsa = draw_multi_heliostat_gaze_angle_mxsa - self.draw_multi_heliostat_gaze_angle_critical = ( - draw_multi_heliostat_gaze_angle_critical - ) - self.draw_multi_heliostat_gaze_angle_example = ( - draw_multi_heliostat_gaze_angle_example - ) + self.draw_multi_heliostat_gaze_angle_critical = draw_multi_heliostat_gaze_angle_critical + self.draw_multi_heliostat_gaze_angle_example = draw_multi_heliostat_gaze_angle_example self.draw_multi_heliostat_gaze_angle_fill = draw_multi_heliostat_gaze_angle_fill - self.draw_multi_heliostat_gaze_angle_legend = ( - draw_multi_heliostat_gaze_angle_legend - ) - self.draw_multi_heliostat_vertical_fov_required = ( - draw_multi_heliostat_vertical_fov_required - ) - self.draw_multi_heliostat_vertical_fov_required_mnsa = ( - draw_multi_heliostat_vertical_fov_required_mnsa - ) - self.draw_multi_heliostat_vertical_fov_required_mxsa = ( - draw_multi_heliostat_vertical_fov_required_mxsa - ) - self.draw_multi_heliostat_vertical_fov_required_critical = ( - draw_multi_heliostat_vertical_fov_required_critical - ) - self.draw_multi_heliostat_vertical_fov_required_camera = ( - draw_multi_heliostat_vertical_fov_required_camera - ) - self.draw_multi_heliostat_vertical_fov_required_legend = ( - draw_multi_heliostat_vertical_fov_required_legend - ) + self.draw_multi_heliostat_gaze_angle_legend = draw_multi_heliostat_gaze_angle_legend + self.draw_multi_heliostat_vertical_fov_required = draw_multi_heliostat_vertical_fov_required + self.draw_multi_heliostat_vertical_fov_required_mnsa = draw_multi_heliostat_vertical_fov_required_mnsa + self.draw_multi_heliostat_vertical_fov_required_mxsa = draw_multi_heliostat_vertical_fov_required_mxsa + self.draw_multi_heliostat_vertical_fov_required_critical = draw_multi_heliostat_vertical_fov_required_critical + self.draw_multi_heliostat_vertical_fov_required_camera = draw_multi_heliostat_vertical_fov_required_camera + self.draw_multi_heliostat_vertical_fov_required_legend = draw_multi_heliostat_vertical_fov_required_legend self.draw_multi_heliostat_select_gaze = draw_multi_heliostat_select_gaze - self.draw_multi_heliostat_select_gaze_shifted = ( - draw_multi_heliostat_select_gaze_shifted - ) - self.draw_multi_heliostat_select_gaze_envelope = ( - draw_multi_heliostat_select_gaze_envelope - ) - self.draw_multi_heliostat_select_gaze_shrunk = ( - draw_multi_heliostat_select_gaze_shrunk - ) - self.draw_multi_heliostat_select_gaze_clipped = ( - draw_multi_heliostat_select_gaze_clipped - ) - self.draw_multi_heliostat_select_gaze_selected = ( - draw_multi_heliostat_select_gaze_selected - ) - self.draw_multi_heliostat_select_gaze_mnsa = ( - draw_multi_heliostat_select_gaze_mnsa - ) - self.draw_multi_heliostat_select_gaze_mxsa = ( - draw_multi_heliostat_select_gaze_mxsa - ) - self.draw_multi_heliostat_select_gaze_critical = ( - draw_multi_heliostat_select_gaze_critical - ) - self.draw_multi_heliostat_select_gaze_fill = ( - draw_multi_heliostat_select_gaze_fill - ) - self.draw_multi_heliostat_select_gaze_legend = ( - draw_multi_heliostat_select_gaze_legend - ) + self.draw_multi_heliostat_select_gaze_shifted = draw_multi_heliostat_select_gaze_shifted + self.draw_multi_heliostat_select_gaze_envelope = draw_multi_heliostat_select_gaze_envelope + self.draw_multi_heliostat_select_gaze_shrunk = draw_multi_heliostat_select_gaze_shrunk + self.draw_multi_heliostat_select_gaze_clipped = draw_multi_heliostat_select_gaze_clipped + self.draw_multi_heliostat_select_gaze_selected = draw_multi_heliostat_select_gaze_selected + self.draw_multi_heliostat_select_gaze_mnsa = draw_multi_heliostat_select_gaze_mnsa + self.draw_multi_heliostat_select_gaze_mxsa = draw_multi_heliostat_select_gaze_mxsa + self.draw_multi_heliostat_select_gaze_critical = draw_multi_heliostat_select_gaze_critical + self.draw_multi_heliostat_select_gaze_fill = draw_multi_heliostat_select_gaze_fill + self.draw_multi_heliostat_select_gaze_legend = draw_multi_heliostat_select_gaze_legend self.draw_multi_heliostat_result = draw_multi_heliostat_result - self.draw_multi_heliostat_result_heliostats = ( - draw_multi_heliostat_result_heliostats - ) + self.draw_multi_heliostat_result_heliostats = draw_multi_heliostat_result_heliostats self.draw_multi_heliostat_result_mnsa_ray = draw_multi_heliostat_result_mnsa_ray self.draw_multi_heliostat_result_mxsa_ray = draw_multi_heliostat_result_mxsa_ray - self.draw_multi_heliostat_result_selected_cacg_line = ( - draw_multi_heliostat_result_selected_cacg_line - ) - self.draw_multi_heliostat_result_length_margin = ( - draw_multi_heliostat_result_length_margin - ) - self.draw_multi_heliostat_result_selected_cacg_segment = ( - draw_multi_heliostat_result_selected_cacg_segment - ) - self.draw_multi_heliostat_result_start_end_loci = ( - draw_multi_heliostat_result_start_end_loci - ) + self.draw_multi_heliostat_result_selected_cacg_line = draw_multi_heliostat_result_selected_cacg_line + self.draw_multi_heliostat_result_length_margin = draw_multi_heliostat_result_length_margin + self.draw_multi_heliostat_result_selected_cacg_segment = draw_multi_heliostat_result_selected_cacg_segment + self.draw_multi_heliostat_result_start_end_loci = draw_multi_heliostat_result_start_end_loci self.draw_multi_heliostat_result_legend = draw_multi_heliostat_result_legend self.draw_single_heliostat_etaC_dict = draw_single_heliostat_etaC_dict diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlWayPoint.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlWayPoint.py index d7cef128..7382d2e8 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlWayPoint.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/RenderControlWayPoint.py @@ -22,9 +22,7 @@ def __init__( draw_position=True, position_style=rcps.marker(marker='.'), draw_stop=True, - stop_style=rcps.marker( - color='r', marker='x', markersize=7 - ), # Same color as heading. + stop_style=rcps.marker(color='r', marker='x', markersize=7), # Same color as heading. draw_heading=True, heading_scale=3, heading_style=rcps.outline(color='r'), @@ -39,10 +37,7 @@ def __init__( # outline_style = rcps.outline(), draw_idx=True, idx_style=rctxt.RenderControlText( - color='k', - fontsize='small', - horizontalalignment='right', - verticalalignment='top', + color='k', fontsize='small', horizontalalignment='right', verticalalignment='top' ), ): super(RenderControlWayPoint, self).__init__() diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/UfacetScanPass.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/UfacetScanPass.py index 69def21b..aa0feb33 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/UfacetScanPass.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/UfacetScanPass.py @@ -31,18 +31,14 @@ def __init__( # Constructed members. self.setup_section() # Sets up projection, heliostat names, and section context. self.pass_constraints = psusa.section_analysis( - self.section_context, - self.heliostat_name_list, - self.assess_heliostat_name_list, + self.section_context, self.heliostat_name_list, self.assess_heliostat_name_list ) def setup_section(self): # Construct section corresponding to this pass. # ?? SCAFFOLDING RCB -- ELIMINATE DUPLICATION, CONFUSION. # ?? SCAFFOLDING RCB -- RENAME, SO THAT "SECTION" IS NOT OVER-USED.. - self.section_context = ( - self.ufacet_scan_parameters.copy() - ) # Copy because we will add to this. + self.section_context = self.ufacet_scan_parameters.copy() # Copy because we will add to this. self.section_context['solar_field'] = self.solar_field self.section_context['view_spec'] = self.section[ 'view_spec' @@ -51,9 +47,7 @@ def setup_section(self): self.heliostat_name_list = self.section['selected_heliostat_name_list'] # Identify heliostats to assess along this pass. - self.assess_heliostat_name_list = ( - self.heliostat_name_list - ) # ?? SCAFFOLDING RCB -- ELIMINATE THIS DISTINCTION + self.assess_heliostat_name_list = self.heliostat_name_list # ?? SCAFFOLDING RCB -- ELIMINATE THIS DISTINCTION # Define the clipping box for rendering. p_min = 1e9 @@ -80,9 +74,7 @@ def waypoints(self): # Fetch path parameters. locale = self.ufacet_scan_parameters['locale'] view_spec = self.section['view_spec'] - pass_segment = self.pass_constraints[ - 'selected_cacg_segment' - ] # "cacg" == "constant altitude, constant gaze" + pass_segment = self.pass_constraints['selected_cacg_segment'] # "cacg" == "constant altitude, constant gaze" # Construct start and end (x,y,z) points. start_pq = pass_segment[0] # Lead-in distance added later. end_pq = pass_segment[1] # Run-past distance added later. @@ -103,11 +95,7 @@ def waypoints(self): start_eta = selected_cacg_etaC[0] end_eta = selected_cacg_etaC[0] else: - print( - 'ERROR: In UfacetScanPass.waypoints(), unexpected gaze_type="' - + str(gaze_type) - + '" encountered.' - ) + print('ERROR: In UfacetScanPass.waypoints(), unexpected gaze_type="' + str(gaze_type) + '" encountered.') assert False # Variable Gaze. # # ?? SCAFFOLDING RCB -- INCORRECT; SHOULD COMPUTE OPTIMUM COMPROMISE IN ANALYSIS. @@ -131,9 +119,7 @@ def waypoints(self): + ' m/sec.' ) # ?? RCB SCAFFOLDING -- STUB # Construct way points. - start_wpt = wp.WayPoint( - locale, start_xyz, theta, start_eta, stop=False, speed=speed - ) + start_wpt = wp.WayPoint(locale, start_xyz, theta, start_eta, stop=False, speed=speed) end_wpt = wp.WayPoint(locale, end_xyz, theta, end_eta, stop=False, speed=speed) # Return. return [start_wpt, end_wpt] diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_render_control.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_render_control.py index fa273b76..19961c84 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_render_control.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_render_control.py @@ -62,9 +62,7 @@ def setup_render_control_scan_section_analysis(): # analysis_render_control.draw_single_heliostat_constraints_assessed_normals = False # analysis_render_control.draw_single_heliostat_constraints_detail = False # analysis_render_control.draw_single_heliostat_constraints_all_targets = False - analysis_render_control.draw_single_heliostat_constraints_summary = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_single_heliostat_constraints_summary = False # KEY SUMMARY # analysis_render_control.draw_single_heliostat_constraints_gaze_example = False # analysis_render_control.draw_single_heliostat_constraints_gaze_example_C = C_draw analysis_render_control.draw_single_heliostat_constraints_legend = False @@ -86,9 +84,7 @@ def setup_render_control_scan_section_analysis(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -156,9 +152,7 @@ def setup_render_control_scan_section_analysis_section_plus_flight_4view(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False # analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False @@ -204,9 +198,7 @@ def setup_render_control_scan_section_analysis_flight_4view(): # analysis_render_control.draw_single_heliostat_constraints_assessed_normals = False # analysis_render_control.draw_single_heliostat_constraints_detail = False # analysis_render_control.draw_single_heliostat_constraints_all_targets = False - analysis_render_control.draw_single_heliostat_constraints_summary = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_single_heliostat_constraints_summary = False # KEY SUMMARY # analysis_render_control.draw_single_heliostat_constraints_gaze_example = False # analysis_render_control.draw_single_heliostat_constraints_gaze_example_C = C_draw analysis_render_control.draw_single_heliostat_constraints_legend = False @@ -228,9 +220,7 @@ def setup_render_control_scan_section_analysis_flight_4view(): analysis_render_control.draw_multi_heliostat_gaze_angle_example = False analysis_render_control.draw_multi_heliostat_gaze_angle_fill = False analysis_render_control.draw_multi_heliostat_gaze_angle_legend = False - analysis_render_control.draw_multi_heliostat_vertical_fov_required = ( - False # KEY SUMMARY - ) + analysis_render_control.draw_multi_heliostat_vertical_fov_required = False # KEY SUMMARY analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend = False analysis_render_control.draw_multi_heliostat_select_gaze = False # KEY SUMMARY # analysis_render_control.draw_multi_heliostat_select_gaze_per_heliostat = False diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py index 60a9b344..1da7f483 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py @@ -35,20 +35,10 @@ def define_scan_NSTTF_half_and_half(solar_field_short_name): solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - lln.LON_NSTTF_ORIGIN, - lln.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - lln.LON_NSTTF_ORIGIN, - lln.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_heliostat_file'] = ( - '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' - ) - solar_field_spec['field_facet_centroids_file'] = ( - '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' - ) + solar_field_spec['field_origin_lon_lat'] = (lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN) + solar_field_spec['field_heliostat_file'] = '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' + solar_field_spec['field_facet_centroids_file'] = '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' # Define UFACET control flags. ufacet_control_parameters = {} @@ -56,15 +46,11 @@ def define_scan_NSTTF_half_and_half(solar_field_short_name): # Seed points. ufacet_curve_keys_x = np.linspace(-131.7, 131.7, 28) ufacet_curve_keys_y = [136.9] * len(ufacet_curve_keys_x) - ufacet_curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y) - ] + ufacet_curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y)] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. # Half-and-Half - ufacet_control_parameters['maximum_altitude'] = ( - 18.0 # m. Maximum altitude, roughly AGL, including slope effects. - ) + ufacet_control_parameters['maximum_altitude'] = 18.0 # m. Maximum altitude, roughly AGL, including slope effects. # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -323,14 +309,7 @@ def define_scan_NSTTF_half_and_half(solar_field_short_name): up_azelhnames = None # Return. - return ( - solar_field_spec, - ufacet_control_parameters, - aimpoint_xyz, - when_ymdhmsz, - synch_azelhnames, - up_azelhnames, - ) + return (solar_field_spec, ufacet_control_parameters, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames) def define_scan_NSTTF_demo(solar_field_short_name): @@ -343,20 +322,10 @@ def define_scan_NSTTF_demo(solar_field_short_name): solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - lln.LON_NSTTF_ORIGIN, - lln.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - lln.LON_NSTTF_ORIGIN, - lln.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_heliostat_file'] = ( - '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' - ) - solar_field_spec['field_facet_centroids_file'] = ( - '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' - ) + solar_field_spec['field_origin_lon_lat'] = (lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN) + solar_field_spec['field_heliostat_file'] = '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' + solar_field_spec['field_facet_centroids_file'] = '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' # Define UFACET control flags. ufacet_control_parameters = {} @@ -364,14 +333,10 @@ def define_scan_NSTTF_demo(solar_field_short_name): # Seed points. ufacet_curve_keys_x = np.linspace(-131.7, 131.7, 28) ufacet_curve_keys_y = [136.9] * len(ufacet_curve_keys_x) - ufacet_curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y) - ] + ufacet_curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y)] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters['maximum_altitude'] = ( - 25.0 # m. Maximum altitude, roughly AGL, including slope effects. - ) + ufacet_control_parameters['maximum_altitude'] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -395,11 +360,7 @@ def define_scan_NSTTF_demo(solar_field_short_name): # Demo. synch_az = np.deg2rad(205) synch_el = np.deg2rad(30) - synch_azelhnames = [ - synch_az, - synch_el, - ['8W3', '8W4', '8W5', '7W3', '7W4', '6W5', '6W3', '6W4', '6W5'], - ] + synch_azelhnames = [synch_az, synch_el, ['8W3', '8W4', '8W5', '7W3', '7W4', '6W5', '6W3', '6W4', '6W5']] # Define upward-facing heliostat orientation. # Demo. @@ -408,40 +369,23 @@ def define_scan_NSTTF_demo(solar_field_short_name): up_azelhnames = [up_az, up_el, ['7E6', '12W7']] # Return. - return ( - solar_field_spec, - ufacet_control_parameters, - aimpoint_xyz, - when_ymdhmsz, - synch_azelhnames, - up_azelhnames, - ) + return (solar_field_spec, ufacet_control_parameters, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames) def define_scan_NSTTF_full_field(solar_field_short_name): """ Simple case where the full field is set to tracking a single aim point. """ - basedir = os.path.join( - root_path.opencsp.dir(), INSERT_CORRECT_DIRECTORY_PATH_HERE - ) # TODO: Fill-in correct path. + basedir = os.path.join(root_path.opencsp.dir(), INSERT_CORRECT_DIRECTORY_PATH_HERE) # TODO: Fill-in correct path. # Solar field spec. solar_field_spec = {} solar_field_spec['name'] = 'Sandia NSTTF' solar_field_spec['short_name'] = 'NSTTF' - solar_field_spec['field_origin_lon_lat'] = ( - lln.LON_NSTTF_ORIGIN, - lln.LAT_NSTTF_ORIGIN, - ) - solar_field_spec['field_origin_lon_lat'] = ( - lln.LON_NSTTF_ORIGIN, - lln.LAT_NSTTF_ORIGIN, - ) + solar_field_spec['field_origin_lon_lat'] = (lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN) + solar_field_spec['field_origin_lon_lat'] = (lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN) solar_field_spec['field_heliostat_file'] = os.path.join(basedir, 'Solar_Field.csv') - solar_field_spec['field_facet_centroids_file'] = os.path.join( - basedir, 'Facets_Centroids.csv' - ) + solar_field_spec['field_facet_centroids_file'] = os.path.join(basedir, 'Facets_Centroids.csv') # Define UFACET control flags. ufacet_control_parameters = {} @@ -449,14 +393,10 @@ def define_scan_NSTTF_full_field(solar_field_short_name): # Seed points. ufacet_curve_keys_x = np.linspace(-131.7, 131.7, 28) ufacet_curve_keys_y = [136.9] * len(ufacet_curve_keys_x) - ufacet_curve_key_xy_list = [ - [key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y) - ] + ufacet_curve_key_xy_list = [[key_x, key_y] for key_x, key_y in zip(ufacet_curve_keys_x, ufacet_curve_keys_y)] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters['maximum_altitude'] = ( - 25.0 # m. Maximum altitude, roughly AGL, including slope effects. - ) + ufacet_control_parameters['maximum_altitude'] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -490,14 +430,7 @@ def define_scan_NSTTF_full_field(solar_field_short_name): up_azelhnames = None # Return. - return ( - solar_field_spec, - ufacet_control_parameters, - aimpoint_xyz, - when_ymdhmsz, - synch_azelhnames, - up_azelhnames, - ) + return (solar_field_spec, ufacet_control_parameters, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames) def define_scan_NSTTF(solar_field_short_name, nsttf_configuration): diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/generate_NSTTF_ufacet_plans_1.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/generate_NSTTF_ufacet_plans_1.py index 60279617..d837f8c3 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/generate_NSTTF_ufacet_plans_1.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/generate_NSTTF_ufacet_plans_1.py @@ -20,11 +20,7 @@ def generate_NSTTF_ufacet_plans( - solar_field_short_name, - nsttf_configuration, - elevation_offset, - save_flight_plan, - save_figures, + solar_field_short_name, nsttf_configuration, elevation_offset, save_flight_plan, save_figures ): # Figure layout. # tile_array=(2,2) @@ -42,9 +38,7 @@ def generate_NSTTF_ufacet_plans( render_control_scan_xy_analysis = drc.setup_render_control_scan_xy_analysis() render_control_scan_section_setup = drc.setup_render_control_scan_section_setup() # render_control_scan_section_analysis = setup_render_control_scan_section_analysis() - render_control_scan_section_analysis = ( - drc.setup_render_control_scan_section_analysis_section_plus_flight_4view() - ) + render_control_scan_section_analysis = drc.setup_render_control_scan_section_analysis_section_plus_flight_4view() # render_control_scan_section_analysis = drc.setup_render_control_scan_section_analysis_flight_4view() # Figure control. @@ -54,9 +48,7 @@ def generate_NSTTF_ufacet_plans( # Scan control parameters. scan_type = 'UFACET' - ufacet_scan_parameter_file = ( - solar_field_short_name # ?? SCAFFOLDING RCB -- TEMPORARY - ) + ufacet_scan_parameter_file = solar_field_short_name # ?? SCAFFOLDING RCB -- TEMPORARY raster_scan_parameter_file = None # Define scan. @@ -189,9 +181,7 @@ def generate_NSTTF_ufacet_plans( when_ymdhmsz_2[when_hour_idx] = trial_spec[trial_spec_hour_idx] when_ymdhmsz_2[when_minute_idx] = trial_spec[trial_spec_minute_idx] aimpoint_xyz_2[2] = trial_spec[trial_spec_z_aim_idx] - ufacet_control_parameters['maximum_altitude'] = trial_spec[ - trial_spec_z_max_idx - ] + ufacet_control_parameters['maximum_altitude'] = trial_spec[trial_spec_z_max_idx] pars.scan_plan_trial( tile_array, solar_field_spec, @@ -214,10 +204,6 @@ def generate_NSTTF_ufacet_plans( def generate_NSTTF_ufacet_plans_1(save_flight_plan, save_figures): elevation_offset = 0 - generate_NSTTF_ufacet_plans( - 'NSTTF', 'Full Field', elevation_offset, save_flight_plan, save_figures - ) + generate_NSTTF_ufacet_plans('NSTTF', 'Full Field', elevation_offset, save_flight_plan, save_figures) # generate_NSTTF_ufacet_plans('NSTTF', 'Demo', elevation_offset, save_flight_plan, save_figures) - generate_NSTTF_ufacet_plans( - 'NSTTF', 'Half-and-Half', elevation_offset, save_flight_plan, save_figures - ) + generate_NSTTF_ufacet_plans('NSTTF', 'Half-and-Half', elevation_offset, save_flight_plan, save_figures) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_and_render_scan.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_and_render_scan.py index ac552908..b66dc6e8 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_and_render_scan.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_and_render_scan.py @@ -53,11 +53,7 @@ def plan_and_render_scan( elif scan_type == 'Vanity': scan_short_name = solar_field.short_name + '_' + scan_type[0] else: - print( - 'ERROR: In plan_and_render_scan(), unexpected scan_type="' - + str(scan_type) - + '" encountered.' - ) + print('ERROR: In plan_and_render_scan(), unexpected scan_type="' + str(scan_type) + '" encountered.') assert False # We're not using this control flag, so eliminate it to streamline file names. # if scan_type == 'UFACET': @@ -66,47 +62,30 @@ def plan_and_render_scan( # if ufacet_control_parameters['delta_eta'] != 0: # scan_name += ', Deta={0:.1f}deg'.format(np.rad2deg(ufacet_control_parameters['delta_eta'])) # scan_short_name += '_Deta={0:.1f}'.format(np.rad2deg(ufacet_control_parameters['delta_eta'])) - output_path = os.path.join( - render_control_top_level.figure_output_path, scan_short_name - ) + output_path = os.path.join(render_control_top_level.figure_output_path, scan_short_name) # Construct the scan. print('Constructing scan...') if scan_type == 'UFACET': # Construct UFACET scan. scan, scan_parameters, ufacet_scan_construction = psu.construct_ufacet_scan( - solar_field, - aimpoint_xyz, - when_ymdhmsz, - ufacet_scan_parameter_file, - ufacet_control_parameters, + solar_field, aimpoint_xyz, when_ymdhmsz, ufacet_scan_parameter_file, ufacet_control_parameters ) elif scan_type == 'Raster': # Construct raster survey scan. - scan, scan_parameters = psr.construct_raster_scan( - solar_field, raster_scan_parameter_file - ) + scan, scan_parameters = psr.construct_raster_scan(solar_field, raster_scan_parameter_file) elif scan_type == 'Vanity': # Construct raster survey scan. scan, scan_parameters = psv.construct_vanity_scan( - solar_field, - vanity_scan_parameter_file, - vanity_heliostat_name, - vanity_heliostat_azimuth, + solar_field, vanity_scan_parameter_file, vanity_heliostat_name, vanity_heliostat_azimuth ) else: - print( - 'ERROR: In plan_and_render_scan, unexpected scan.type = ' - + str(scan.type) - + ' encountered.' - ) + print('ERROR: In plan_and_render_scan, unexpected scan.type = ' + str(scan.type) + ' encountered.') assert False # Construct the flight plan. - flight_plan = fp.construct_flight_plan_from_scan( - scan_name, scan_short_name, launch_name, scan - ) + flight_plan = fp.construct_flight_plan_from_scan(scan_name, scan_short_name, launch_name, scan) # Construct object representing the flight over the solar field. flight_over_solar_field = fosf.FlightOverSolarField(solar_field, flight_plan) @@ -125,9 +104,7 @@ def plan_and_render_scan( print('Drawing UFACET (x,y) analysis...') curve_key_xy_list = ufacet_scan_construction['curve_key_xy_list'] list_of_ideal_xy_lists = ufacet_scan_construction['list_of_ideal_xy_lists'] - list_of_best_fit_segment_xys = ufacet_scan_construction[ - 'list_of_best_fit_segment_xys' - ] + list_of_best_fit_segment_xys = ufacet_scan_construction['list_of_best_fit_segment_xys'] psuxyar.draw_ufacet_xy_analysis( figure_control, solar_field, @@ -146,20 +123,14 @@ def plan_and_render_scan( section_list = ufacet_scan_construction['section_list'] # psuscr.draw_construct_ufacet_sections(figure_control, solar_field, section_list, vs.view_spec_3d(), render_control_scan_section_setup) psuscr.draw_construct_ufacet_sections( - figure_control, - solar_field, - section_list, - vs.view_spec_xy(), - render_control_scan_section_setup, + figure_control, solar_field, section_list, vs.view_spec_xy(), render_control_scan_section_setup ) # psuscr.draw_construct_ufacet_sections(figure_control, solar_field, section_list, None, render_control_scan_section_setup) # Use section view. if render_control_top_level.draw_ufacet_scan: # Draw the scan. print('Drawing UFACET scan...') - psur.draw_ufacet_scan( - figure_control, scan, render_control_scan_section_analysis - ) + psur.draw_ufacet_scan(figure_control, scan, render_control_scan_section_analysis) # Draw the flight plan. if render_control_top_level.draw_flight_plan: @@ -169,9 +140,7 @@ def plan_and_render_scan( if render_control_top_level.xy_solar_field_style != None: xy_solar_field_style = render_control_top_level.xy_solar_field_style rcfosf_default = rcfosf.default() - rcfosf_vfield = rcfosf.RenderControlFlightOverSolarField( - solar_field_style=xy_solar_field_style - ) + rcfosf_vfield = rcfosf.RenderControlFlightOverSolarField(solar_field_style=xy_solar_field_style) # Draw. view_3d = fosf.draw_flight_over_solar_field( figure_control, flight_over_solar_field, rcfosf_default, vs.view_spec_3d() @@ -246,9 +215,7 @@ def scan_plan_trial( figure_control = rcfg.RenderControlFigure(tile_array=tile_array, tile_square=False) # Initialize solar field. - solar_field = sf.setup_solar_field( - solar_field_spec, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames - ) + solar_field = sf.setup_solar_field(solar_field_spec, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames) if scan_type == 'Vanity': solar_field.set_full_field_stow() # ?? SCAFFOLDING RCB -- MAKE GENERAL diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py index f79652b1..d2532e6e 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py @@ -27,9 +27,7 @@ def construct_scan_parameters(scan_parameter_file): # Scan flight. scan_parameters['lead_in'] = 18 # m. # ** Overriden by vanity flights. ** scan_parameters['run_past'] = 9 # m. # ** Overriden by vanity flights. ** - scan_parameters['fly_forward_backward'] = ( - False # ** Overriden by vanity flights, raster flights. ** - ) + scan_parameters['fly_forward_backward'] = False # ** Overriden by vanity flights, raster flights. ** # Return. return scan_parameters diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster.py index 09ef4726..157a4c69 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster.py @@ -14,14 +14,10 @@ def construct_raster_scan(solar_field, raster_scan_parameter_file): print('Constructing raster scan...') # Fetch scan parameters. - raster_scan_parameters = psrp.construct_raster_scan_parameters( - raster_scan_parameter_file - ) + raster_scan_parameters = psrp.construct_raster_scan_parameters(raster_scan_parameter_file) # Construct the scan. - scan = sf.construct_solar_field_heliostat_survey_scan( - solar_field, raster_scan_parameters - ) + scan = sf.construct_solar_field_heliostat_survey_scan(solar_field, raster_scan_parameters) # Return. # Return the scan parameters, because they include information for converting the scan into a flight. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py index 7ec00c64..761f3b2e 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py @@ -28,11 +28,7 @@ def check_eta(eta): + str(np.rad2deg(eta)) + 'encountered.' ) - print( - ' We reject positive gaze angles exceeding ' - + str(np.rad2deg(eta_max)) - + ' degrees.' - ) + print(' We reject positive gaze angles exceeding ' + str(np.rad2deg(eta_max)) + ' degrees.') assert False eta_min = np.rad2deg(-90.0) if eta < eta_min: @@ -41,11 +37,7 @@ def check_eta(eta): + str(np.rad2deg(eta)) + 'encountered.' ) - print( - ' We reject gaze angles less than ' - + str(np.rad2deg(eta_min)) - + ' degrees.' - ) + print(' We reject gaze angles less than ' + str(np.rad2deg(eta_min)) + ' degrees.') assert False @@ -58,9 +50,7 @@ def construct_raster_scan_parameters(raster_scan_parameter_file): eta = np.deg2rad(-35.0) # Arbitrary test value. scan_parameters['n_horizontal'] = 10 # Number of horizontal passes. scan_parameters['n_vertical'] = 6 # Number of vertical passes. - scan_parameters['eta'] = ( - eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). - ) + scan_parameters['eta'] = eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). scan_parameters['relative_z'] = 20 # m. scan_parameters['speed'] = 10 # m/sec. # Check result and return. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py index 3a798662..49cc5dc5 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py @@ -15,11 +15,7 @@ def construct_ufacet_scan( - solar_field, - aimpoint_xyz, - when_ymdhmsz, - ufacet_scan_parameter_file, - ufacet_control_parameters, + solar_field, aimpoint_xyz, when_ymdhmsz, ufacet_scan_parameter_file, ufacet_control_parameters ): # Notify progress. print('Constructing UFACET scan...') @@ -39,29 +35,19 @@ def construct_ufacet_scan( ) # UFACET section analysis. - section_list = psusc.construct_ufacet_sections( - solar_field, list_of_best_fit_segment_xys, ufacet_scan_parameters - ) + section_list = psusc.construct_ufacet_sections(solar_field, list_of_best_fit_segment_xys, ufacet_scan_parameters) # Construct individual UFACET scan passes. - scan_pass_list = usp.construct_ufacet_passes( - solar_field, section_list, ufacet_scan_parameters - ) + scan_pass_list = usp.construct_ufacet_passes(solar_field, section_list, ufacet_scan_parameters) # Construct the scan. - scan = Scan.construct_scan_given_UFACET_scan_passes( - scan_pass_list, ufacet_scan_parameters - ) + scan = Scan.construct_scan_given_UFACET_scan_passes(scan_pass_list, ufacet_scan_parameters) # Store results. ufacet_scan_construction = {} - ufacet_scan_construction['curve_key_xy_list'] = ufacet_control_parameters[ - 'curve_key_xy_list' - ] + ufacet_scan_construction['curve_key_xy_list'] = ufacet_control_parameters['curve_key_xy_list'] ufacet_scan_construction['list_of_ideal_xy_lists'] = list_of_ideal_xy_lists - ufacet_scan_construction['list_of_best_fit_segment_xys'] = ( - list_of_best_fit_segment_xys - ) + ufacet_scan_construction['list_of_best_fit_segment_xys'] = list_of_best_fit_segment_xys ufacet_scan_construction['section_list'] = section_list ufacet_scan_construction['scan_pass_list'] = scan_pass_list diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py index 153920e4..974ba2a5 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py @@ -10,9 +10,7 @@ import opencsp.app.ufacets.flight_planner_ufacet.U_Code.lib.plan_scan_parameters as psp -def construct_ufacet_scan_parameters( - ufacet_scan_parameter_file, ufacet_control_parameters -): +def construct_ufacet_scan_parameters(ufacet_scan_parameter_file, ufacet_control_parameters): # General scan parameters. scan_parameters = psp.construct_scan_parameters(ufacet_scan_parameter_file) @@ -31,15 +29,11 @@ def construct_ufacet_scan_parameters( scan_parameters['p_margin'] = ( 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. ) - scan_parameters['altitude_margin'] = ( - 2.5 # m. Clearance above highest possible heliostat point. - ) + scan_parameters['altitude_margin'] = 2.5 # m. Clearance above highest possible heliostat point. scan_parameters['maximum_safe_altitude'] = ( 90.0 # meters. Driven by safey considerations. Control limit may be tighter. ) - scan_parameters['maximum_target_lookback'] = ( - 3 # Number of heliostats to look back for reflection targets. - ) + scan_parameters['maximum_target_lookback'] = 3 # Number of heliostats to look back for reflection targets. scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -53,19 +47,12 @@ def construct_ufacet_scan_parameters( # Add control parameters. for key in ufacet_control_parameters.keys(): if key in scan_parameters.keys(): - print( - 'ERROR: In construct_ufacet_scan_parameters(1), duplicate key="' - + str(key) - + '" encountered.' - ) + print('ERROR: In construct_ufacet_scan_parameters(1), duplicate key="' + str(key) + '" encountered.') assert False scan_parameters[key] = ufacet_control_parameters[key] # Ensure that maximum altitude does not exceed the maximum safe altitude. - if ( - scan_parameters['maximum_altitude'] - > scan_parameters['maximum_safe_altitude'] - ): + if scan_parameters['maximum_altitude'] > scan_parameters['maximum_safe_altitude']: print( 'NOTE: In construct_ufacet_scan_parameters(), input maximum altitude = ' + str(scan_parameters['maximum_altitude']) @@ -73,9 +60,7 @@ def construct_ufacet_scan_parameters( + str(scan_parameters['maximum_safe_altitude']) + ' m. Clamping to safe limit.' ) - scan_parameters['maximum_altitude'] = scan_parameters[ - 'maximum_safe_altitude' - ] + scan_parameters['maximum_altitude'] = scan_parameters['maximum_safe_altitude'] # Return. return scan_parameters diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_render.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_render.py index 72dc45f1..1843870d 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_render.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_render.py @@ -9,6 +9,4 @@ def draw_ufacet_scan(figure_control, scan, render_control_scan_section_analysis): # Render the analysis. for scan_pass in scan.passes: - scan_pass.ufacet_scan_pass().draw_section_analysis( - figure_control, render_control_scan_section_analysis - ) + scan_pass.ufacet_scan_pass().draw_section_analysis(figure_control, render_control_scan_section_analysis) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py index 95312974..174bf618 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py @@ -38,9 +38,7 @@ def construct_minimum_safe_altitude_line(section_context, heliostat_name_list): for heliostat_name in heliostat_name_list: heliostat = solar_field.lookup_heliostat(heliostat_name) heliostat_pq = vs.xyz2pq(heliostat.origin, view_spec) - signed_distance = g2d.homogeneous_line_signed_distance_to_xy( - heliostat_pq, origin_line - ) + signed_distance = g2d.homogeneous_line_signed_distance_to_xy(heliostat_pq, origin_line) if signed_distance > maximum_signed_distance: maximum_signed_distance = signed_distance @@ -96,23 +94,17 @@ def construct_flight_path_family(section_context): # B is the y component of the average-ground surface normal. # C is distance from origin to path line, in meters. If path is level, z is altitude. # The path corresponds to the homogeneous line equation Ax + By + C = 0. - A = -above_all_origin_line[ - 0 - ] # Flip orientation, so increasing C implies increasing altitude. + A = -above_all_origin_line[0] # Flip orientation, so increasing C implies increasing altitude. B = -above_all_origin_line[1] # C = -above_all_origin_line[2] # # The coefficient C is the flight path height above the global (x,y,z) origin. # Setting C_min = C would specify a flight path roughly through the heliostat origins. # Instead we set C_min to correspond to the first available safe flight altitude. C_mnsa = C + mnsa_height_above_origin # m. - C_mxsa = ( - maximum_safe_altitude # m. Inexact, because this doesn't account for slope. - ) + C_mxsa = maximum_safe_altitude # m. Inexact, because this doesn't account for slope. C_min = C_mnsa # m. C_max = C_mxsa # m. (C_max - C_min) is arbitrary, and simply controls range covereed in computation. - C_step = ( - 1.0 # m C_step is arbitrary, and controls altitude resolution of analysis. - ) + C_step = 1.0 # m C_step is arbitrary, and controls altitude resolution of analysis. # Save path family. section_context['path_family_A'] = A @@ -169,9 +161,7 @@ def construct_reflected_point(mirror_pq, nu, target_pq): # (measured ccw from x-axis). dp_mirror_to_target = target_pq[0] - mirror_pq[0] dq_mirror_to_target = target_pq[1] - mirror_pq[1] - theta = math.atan2( - dq_mirror_to_target, dp_mirror_to_target - ) # Angle ccw from p axis to ray [mirror --> target]. + theta = math.atan2(dq_mirror_to_target, dp_mirror_to_target) # Angle ccw from p axis to ray [mirror --> target]. iota = theta - nu # Incidence angle. rho = nu - iota # Reflected ray angle from p axis, measured ccw. length = 10 @@ -193,9 +183,7 @@ def ray_eta(ray): return math.atan2(dy, dx) -def single_heliostat_gaze_angle_analysis( - section_context, ab_pq, at_pq, s_locus, e_locus, constraints -): +def single_heliostat_gaze_angle_analysis(section_context, ab_pq, at_pq, s_locus, e_locus, constraints): # Fetch family of flight paths. A = section_context['path_family_A'] B = section_context['path_family_B'] @@ -214,12 +202,8 @@ def single_heliostat_gaze_angle_analysis( # Loop. while C <= C_max: path = [A, B, C] - path_s_pq = g2d.intersect_lines( - path, g2d.homogeneous_line(s_locus[0], s_locus[1]) - ) - path_e_pq = g2d.intersect_lines( - path, g2d.homogeneous_line(e_locus[0], e_locus[1]) - ) + path_s_pq = g2d.intersect_lines(path, g2d.homogeneous_line(s_locus[0], s_locus[1])) + path_e_pq = g2d.intersect_lines(path, g2d.homogeneous_line(e_locus[0], e_locus[1])) ray_min_eta = [path_e_pq, ab_pq] min_eta = ray_eta(ray_min_eta) ray_max_eta = [path_s_pq, at_pq] @@ -236,24 +220,16 @@ def single_heliostat_gaze_angle_analysis( C += C_step # Save constraints. - constraints['path_s_pq_list'] = ( - path_s_pq_list # Path sacn region start point, as a function of C. - ) - constraints['path_e_pq_list'] = ( - path_e_pq_list # Path sacn region start point, as a function of C. - ) + constraints['path_s_pq_list'] = path_s_pq_list # Path sacn region start point, as a function of C. + constraints['path_e_pq_list'] = path_e_pq_list # Path sacn region start point, as a function of C. constraints['ray_min_eta_list'] = ( ray_min_eta_list # Ray pointing from path start point to assessed heliostat top edge, function of C. ) constraints['ray_max_eta_list'] = ( ray_max_eta_list # Ray pointing from path end point to assessed heliostat bottom edge, function of C. ) - constraints['min_etaC_list'] = ( - min_etaC_list # Lower bound on required gaze angle interval, as a function of C. - ) - constraints['max_etaC_list'] = ( - max_etaC_list # Upper bound on required gaze angle interval, as a function of C. - ) + constraints['min_etaC_list'] = min_etaC_list # Lower bound on required gaze angle interval, as a function of C. + constraints['max_etaC_list'] = max_etaC_list # Upper bound on required gaze angle interval, as a function of C. # Return. return constraints @@ -283,22 +259,16 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): # Construct the feasible gaze angles. envelope_min_etaC_list = [] envelope_max_etaC_list = [] - for shifted_min_etaC, shifted_max_etaC in zip( - shifted_min_etaC_list, shifted_max_etaC_list - ): + for shifted_min_etaC, shifted_max_etaC in zip(shifted_min_etaC_list, shifted_max_etaC_list): if shifted_min_etaC[0] >= shifted_max_etaC[0]: - envelope_min_etaC_list.append( - shifted_max_etaC - ) # Lines crossed, so we swap min.max sense. + envelope_min_etaC_list.append(shifted_max_etaC) # Lines crossed, so we swap min.max sense. envelope_max_etaC_list.append(shifted_min_etaC) # # Shrink by gaze tolerance. gaze_tolerance = section_context['gaze_tolerance'] shrunk_min_etaC_list = [] shrunk_max_etaC_list = [] - for envelope_min_etaC, envelope_max_etaC in zip( - envelope_min_etaC_list, envelope_max_etaC_list - ): + for envelope_min_etaC, envelope_max_etaC in zip(envelope_min_etaC_list, envelope_max_etaC_list): C = envelope_min_etaC[1] envelope_min_eta = envelope_min_etaC[0] envelope_max_eta = envelope_max_etaC[0] @@ -313,9 +283,7 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): eta_max = section_context['eta_max'] clipped_min_etaC_list = [] clipped_max_etaC_list = [] - for envelope_min_etaC, envelope_max_etaC in zip( - shrunk_min_etaC_list, shrunk_max_etaC_list - ): + for envelope_min_etaC, envelope_max_etaC in zip(shrunk_min_etaC_list, shrunk_max_etaC_list): C = envelope_min_etaC[1] envelope_min_eta = envelope_min_etaC[0] envelope_max_eta = envelope_max_etaC[0] @@ -338,21 +306,10 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): C_critical = constraints['C_critical'] selected_cacg_C = max(C_clipped, C_critical) else: - print( - 'WARNING: In single_heliostat_gaze_angle_selection(), infeasible case encountered.' - ) - print( - ' len(envelope_min_etaC_list) = ', - len(envelope_min_etaC_list), - ) - print( - ' len(shrunk_min_etaC_list) = ', - len(shrunk_min_etaC_list), - ) - print( - ' len(clipped_min_etaC_list) = ', - len(clipped_min_etaC_list), - ) + print('WARNING: In single_heliostat_gaze_angle_selection(), infeasible case encountered.') + print(' len(envelope_min_etaC_list) = ', len(envelope_min_etaC_list)) + print(' len(shrunk_min_etaC_list) = ', len(shrunk_min_etaC_list)) + print(' len(clipped_min_etaC_list) = ', len(clipped_min_etaC_list)) # Set values that will force the following code to clip the altitude and gaze angle. selected_cacg_eta = np.deg2rad(-200.0) # Extreme value. selected_cacg_C = 100000.0 # m. Signal value. @@ -367,9 +324,7 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): # max_C = section_context['maximum_altitude'] if selected_cacg_C > max_C: - print( - 'NOTE: In single_heliostat_gaze_angle_selection(), clipping to max altitude.' - ) + print('NOTE: In single_heliostat_gaze_angle_selection(), clipping to max altitude.') # Pick altitude, then lookup eta that ensures mirror will be seen intially facing all sky. if len(shifted_max_etaC_list) < 2: print( @@ -378,9 +333,7 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): assert False selected_cacg_C = max_C # m. if shifted_max_etaC_list[0][1] > selected_cacg_C: - print( - 'ERROR: In single_heliostat_gaze_angle_selection(), shifted_max_etaC_list starts above C.' - ) + print('ERROR: In single_heliostat_gaze_angle_selection(), shifted_max_etaC_list starts above C.') assert False # Search for point on eta = f(C) curve. selected_cacg_eta = math.inf @@ -397,19 +350,13 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): break prev_C = C if selected_cacg_eta == math.inf: - print( - 'ERROR: In single_heliostat_gaze_angle_selection(), unexpected selected_cacg_eta == math.inf.' - ) + print('ERROR: In single_heliostat_gaze_angle_selection(), unexpected selected_cacg_eta == math.inf.') assert False if prev_C > selected_cacg_C: - print( - 'ERROR: In single_heliostat_gaze_angle_selection(), unexpected prev_C > selected_cacg_C.' - ) + print('ERROR: In single_heliostat_gaze_angle_selection(), unexpected prev_C > selected_cacg_C.') assert False if C < selected_cacg_C: - print( - 'ERROR: In single_heliostat_gaze_angle_selection(), unexpected C < selected_cacg_C.' - ) + print('ERROR: In single_heliostat_gaze_angle_selection(), unexpected C < selected_cacg_C.') assert False # Shrink. # Recall max and min swapped. @@ -437,9 +384,7 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): constraints['shrunk_max_etaC_list'] = shrunk_max_etaC_list constraints['clipped_min_etaC_list'] = clipped_min_etaC_list constraints['clipped_max_etaC_list'] = clipped_max_etaC_list - constraints['selected_cacg_etaC'] = ( - selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" - ) + constraints['selected_cacg_etaC'] = selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" # Return. return constraints @@ -474,9 +419,7 @@ def single_heliostat_section_analysis( h_b_name = heliostat_name_list[h_a_idx + 1] # Background corner. else: h_b_name = None - at_pq, ab_pq, t_pq_list, bb_pq = fetch_key_points( - section_context, h_a_name, h_t_name_list, h_b_name - ) + at_pq, ab_pq, t_pq_list, bb_pq = fetch_key_points(section_context, h_a_name, h_t_name_list, h_b_name) # If there are no target points, createa a fictitious target to simplify downstream computation. if len(t_pq_list) == 0: @@ -486,18 +429,13 @@ def single_heliostat_section_analysis( ) assert False dp = at_pq[0] - bb_pq[0] - fictitous_t_pq = [ - (ab_pq[0] + dp), - at_pq[1], - ] # Imitate [assessed --> background] relative postion. + fictitous_t_pq = [(ab_pq[0] + dp), at_pq[1]] # Imitate [assessed --> background] relative postion. t_pq_list.append(fictitous_t_pq) # Assessed surface normal. dp_abat = at_pq[0] - ab_pq[0] dq_abat = at_pq[1] - ab_pq[1] - tau = math.atan2( - dq_abat, dp_abat - ) # Angle from p axis to assessed heliostat tangent. + tau = math.atan2(dq_abat, dp_abat) # Angle from p axis to assessed heliostat tangent. nu = tau + (np.pi / 2) # Angle from p axis to assessed heliostat surface normal. # PASS START ANALYSIS @@ -523,9 +461,7 @@ def single_heliostat_section_analysis( # CONSTRAINT: Back-side reflection must not start yet. # t1s_ub = "target reflection start, upper bound." An upper bound on p. # tsm_ub = "target reflection start with margin, upper bound." An upper bound on p. - ts_ub_list = [ - [ab_pq, construct_reflected_point(ab_pq, nu, t_pq)] for t_pq in t_pq_list - ] + ts_ub_list = [[ab_pq, construct_reflected_point(ab_pq, nu, t_pq)] for t_pq in t_pq_list] if len(ts_ub_list) >= 1: ts_ub = ts_ub_list[0] # ts_ub_list is sorted with dominant first. tsm_ub = g2d.shift_x(ts_ub, -p_margin) @@ -567,25 +503,19 @@ def single_heliostat_section_analysis( dp_dom = dom1_pq[0] - dom0_pq[0] dq_dom = dom1_pq[1] - dom0_pq[1] mu = math.atan2(dq_dom, dp_dom) - length = ( - 10 # Arbitrary positive number. Unimportant because it will be extended. - ) + length = 10 # Arbitrary positive number. Unimportant because it will be extended. dom2_pq = [sca_pq[0] + (length * np.cos(mu)), sca_pq[1] + (length * np.sin(mu))] # Construct the start locus. s_locus = [sca_pq, dom2_pq] else: - print( - 'ERROR: In single_heliostat_section_analysis(), No-target start locus case not supported.\n' - ) + print('ERROR: In single_heliostat_section_analysis(), No-target start locus case not supported.\n') assert False # PASS END ANALYSIS # CONSTRAINT: Back-side reflection must have reached top of assessed heliostat. # t1e_lb = "target 1 reflection end, lower bound." A lower bound on p. - te_lb_list = [ - [at_pq, construct_reflected_point(at_pq, nu, t_pq)] for t_pq in t_pq_list - ] + te_lb_list = [[at_pq, construct_reflected_point(at_pq, nu, t_pq)] for t_pq in t_pq_list] if len(te_lb_list) >= 1: te_lb = te_lb_list[0] # ts_ub_list is sorted with dominant first. tem_lb = g2d.shift_x(te_lb, p_margin) @@ -606,9 +536,7 @@ def single_heliostat_section_analysis( # Its starting point is the assessed heliostat top edge, which is obviously below the minimum safe altitude. eca_pq = g2d.intersect_rays(mnsa_ray, te_lb) else: - print( - 'ERROR: In single_heliostat_section_analysis(), No-target end critical altitude case not supported (1).' - ) + print('ERROR: In single_heliostat_section_analysis(), No-target end critical altitude case not supported (1).') assert False # Construct the end locus. @@ -627,9 +555,7 @@ def single_heliostat_section_analysis( # Construct the end locus. e_locus = [eca_pq, dom2_pq] else: - print( - 'ERROR: In single_heliostat_section_analysis(), No-target, end critical altitude case not supported (2).' - ) + print('ERROR: In single_heliostat_section_analysis(), No-target, end critical altitude case not supported (2).') assert False # Critical altitude. @@ -642,32 +568,20 @@ def single_heliostat_section_analysis( # Save the constraints. # Save before gaze angle analysis, because some gaze angle analysis routines might want to fetch contsraints. constraints = {} - constraints['h_a_idx'] = ( - h_a_idx # Assessed heliostat index in assess_heliostat_name_list. - ) + constraints['h_a_idx'] = h_a_idx # Assessed heliostat index in assess_heliostat_name_list. constraints['h_a_name'] = h_a_name # Assessed heliostat name. - constraints['h_t_name_list'] = ( - h_t_name_list # Reflected target heliostat name list. - ) + constraints['h_t_name_list'] = h_t_name_list # Reflected target heliostat name list. constraints['h_b_name'] = h_b_name # Background heliostat name. constraints['at_pq'] = at_pq # Top corner of assessed heliostat. constraints['ab_pq'] = ab_pq # Bottom corner of assessed heliostat. - constraints['t_pq_list'] = ( - t_pq_list # List of reflection target points. Might include a fictitious point. - ) + constraints['t_pq_list'] = t_pq_list # List of reflection target points. Might include a fictitious point. constraints['bb_pq'] = bb_pq # Bottom corner of background heliostat. - constraints['nu'] = ( - nu # Angle from p axis to assessed heliostat surface normal, measured ccw. - ) + constraints['nu'] = nu # Angle from p axis to assessed heliostat surface normal, measured ccw. constraints['abv_lb'] = abv_lb # Assessed bottom visibility, p lower bound. - constraints['abvm_lb'] = ( - abvm_lb # Assessed bottom visibility margin, p lower bound. - ) + constraints['abvm_lb'] = abvm_lb # Assessed bottom visibility margin, p lower bound. constraints['atv_lb'] = atv_lb # Assessed top visibility, p lower bound. constraints['atvm_lb'] = atvm_lb # Assessed top visibility margin, p lower bound. - constraints['ts_ub_list'] = ( - ts_ub_list # Target reflection start list, p upper bound. - ) + constraints['ts_ub_list'] = ts_ub_list # Target reflection start list, p upper bound. constraints['ts_ub'] = ts_ub # Target reflection start, p upper bound. constraints['tsm_ub'] = tsm_ub # Target reflection margin, p upper bound. constraints['sca_pq'] = sca_pq # Path start critical altitude point. @@ -680,14 +594,10 @@ def single_heliostat_section_analysis( constraints['e_locus'] = e_locus # Valid pass end points. constraints['C_start'] = C_start # Altitude of start critical point. constraints['C_end'] = C_end # Altitude of end critical point. - constraints['C_critical'] = ( - C_critical # Critical altitude, considering both start and end. - ) + constraints['C_critical'] = C_critical # Critical altitude, considering both start and end. # GAZE ANGLE ANALYSIS - constraints = single_heliostat_gaze_angle_analysis( - section_context, ab_pq, at_pq, s_locus, e_locus, constraints - ) + constraints = single_heliostat_gaze_angle_analysis(section_context, ab_pq, at_pq, s_locus, e_locus, constraints) constraints = single_heliostat_gaze_angle_selection(section_context, constraints) # Return. @@ -708,14 +618,10 @@ def multi_heliostat_gaze_angle_analysis(pass_constraints): pass_max_etaC_list = h_a_max_etaC_list.copy() else: if len(pass_min_etaC_list) != len(h_a_min_etaC_list): - print( - 'ERROR: In, multi_heliostat_gaze_angle_analysis(), mismatched min_etaC lengths encountered.' - ) + print('ERROR: In, multi_heliostat_gaze_angle_analysis(), mismatched min_etaC lengths encountered.') assert False if len(pass_max_etaC_list) != len(h_a_max_etaC_list): - print( - 'ERROR: In, multi_heliostat_gaze_angle_analysis(), mismatched max_etaC lengths encountered.' - ) + print('ERROR: In, multi_heliostat_gaze_angle_analysis(), mismatched max_etaC lengths encountered.') assert False for pass_min_etaC, h_a_min_etaC, idx in zip( pass_min_etaC_list, h_a_min_etaC_list, range(0, len(pass_min_etaC_list)) @@ -758,22 +664,16 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): # Construct the feasible gaze angles. envelope_min_etaC_list = [] envelope_max_etaC_list = [] - for shifted_min_etaC, shifted_max_etaC in zip( - shifted_min_etaC_list, shifted_max_etaC_list - ): + for shifted_min_etaC, shifted_max_etaC in zip(shifted_min_etaC_list, shifted_max_etaC_list): if shifted_min_etaC[0] >= shifted_max_etaC[0]: - envelope_min_etaC_list.append( - shifted_max_etaC - ) # Lines crossed, so we swap min.max sense. + envelope_min_etaC_list.append(shifted_max_etaC) # Lines crossed, so we swap min.max sense. envelope_max_etaC_list.append(shifted_min_etaC) # # Shrink by gaze tolerance. gaze_tolerance = section_context['gaze_tolerance'] shrunk_min_etaC_list = [] shrunk_max_etaC_list = [] - for envelope_min_etaC, envelope_max_etaC in zip( - envelope_min_etaC_list, envelope_max_etaC_list - ): + for envelope_min_etaC, envelope_max_etaC in zip(envelope_min_etaC_list, envelope_max_etaC_list): C = envelope_min_etaC[1] envelope_min_eta = envelope_min_etaC[0] envelope_max_eta = envelope_max_etaC[0] @@ -788,9 +688,7 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): eta_max = section_context['eta_max'] clipped_min_etaC_list = [] clipped_max_etaC_list = [] - for envelope_min_etaC, envelope_max_etaC in zip( - shrunk_min_etaC_list, shrunk_max_etaC_list - ): + for envelope_min_etaC, envelope_max_etaC in zip(shrunk_min_etaC_list, shrunk_max_etaC_list): C = envelope_min_etaC[1] envelope_min_eta = envelope_min_etaC[0] envelope_max_eta = envelope_max_etaC[0] @@ -819,21 +717,10 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): max_C_critical = h_a_C_critical selected_cacg_C = max(C_clipped, max_C_critical) else: - print( - 'WARNING: In multi_heliostat_gaze_angle_selection(), infeasible case encountered.' - ) - print( - ' len(envelope_min_etaC_list) = ', - len(envelope_min_etaC_list), - ) - print( - ' len(shrunk_min_etaC_list) = ', - len(shrunk_min_etaC_list), - ) - print( - ' len(clipped_min_etaC_list) = ', - len(clipped_min_etaC_list), - ) + print('WARNING: In multi_heliostat_gaze_angle_selection(), infeasible case encountered.') + print(' len(envelope_min_etaC_list) = ', len(envelope_min_etaC_list)) + print(' len(shrunk_min_etaC_list) = ', len(shrunk_min_etaC_list)) + print(' len(clipped_min_etaC_list) = ', len(clipped_min_etaC_list)) # Set values that will force the following code to clip the altitude and gaze angle. selected_cacg_eta = np.deg2rad(-90.0) # Extreme value. selected_cacg_C = 90.0 # m. Signal value. @@ -848,9 +735,7 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): # max_C = section_context['maximum_altitude'] if selected_cacg_C > max_C: - print( - 'NOTE: In multi_heliostat_gaze_angle_selection(), clipping to max altitude.' - ) + print('NOTE: In multi_heliostat_gaze_angle_selection(), clipping to max altitude.') # Pick altitude, then lookup eta that ensures mirror will be seen intially facing all sky. if len(shifted_max_etaC_list) < 2: print( @@ -859,9 +744,7 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): assert False selected_cacg_C = max_C # m. if shifted_max_etaC_list[0][1] > selected_cacg_C: - print( - 'ERROR: In multi_heliostat_gaze_angle_selection(), shifted_max_etaC_list starts above C.' - ) + print('ERROR: In multi_heliostat_gaze_angle_selection(), shifted_max_etaC_list starts above C.') assert False # Search for point on eta = f(C) curve. selected_cacg_eta = math.inf @@ -873,19 +756,13 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): break prev_C = C if selected_cacg_eta == math.inf: - print( - 'ERROR: In multi_heliostat_gaze_angle_selection(), unexpected selected_cacg_eta == math.inf.' - ) + print('ERROR: In multi_heliostat_gaze_angle_selection(), unexpected selected_cacg_eta == math.inf.') assert False if prev_C > selected_cacg_C: - print( - 'ERROR: In multi_heliostat_gaze_angle_selection(), unexpected prev_C > selected_cacg_C.' - ) + print('ERROR: In multi_heliostat_gaze_angle_selection(), unexpected prev_C > selected_cacg_C.') assert False if C < selected_cacg_C: - print( - 'ERROR: In multi_heliostat_gaze_angle_selection(), unexpected C < selected_cacg_C.' - ) + print('ERROR: In multi_heliostat_gaze_angle_selection(), unexpected C < selected_cacg_C.') assert False # Shrink. # Recall max and min swapped. @@ -913,21 +790,15 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): pass_constraints['clipped_max_etaC_list'] = clipped_max_etaC_list pass_constraints['shrunk_min_etaC_list'] = shrunk_min_etaC_list pass_constraints['shrunk_max_etaC_list'] = shrunk_max_etaC_list - pass_constraints['selected_cacg_etaC'] = ( - selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" - ) + pass_constraints['selected_cacg_etaC'] = selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" # Return. return pass_constraints -def multi_heliostat_construct_flight_path( - section_context, assess_heliostat_name_list, pass_constraints -): +def multi_heliostat_construct_flight_path(section_context, assess_heliostat_name_list, pass_constraints): # Fetch the selected gaze angle and altitude. - selected_cacg_etaC = pass_constraints[ - 'selected_cacg_etaC' - ] # "cacg" == "constant altitude, constant gaze" + selected_cacg_etaC = pass_constraints['selected_cacg_etaC'] # "cacg" == "constant altitude, constant gaze" # Fetch family of flight paths. A = section_context['path_family_A'] @@ -995,9 +866,7 @@ def assemble_single_heliostat_etaC_dict(pass_constraints): per_heliostat_constraints = pass_constraints['per_heliostat_constraints'] for h_a_name in per_heliostat_constraints.keys(): h_a_constraints = per_heliostat_constraints[h_a_name] - selected_cacg_etaC = h_a_constraints[ - 'selected_cacg_etaC' - ] # "cacg" == "constant altitude, constant gaze" + selected_cacg_etaC = h_a_constraints['selected_cacg_etaC'] # "cacg" == "constant altitude, constant gaze" selected_cacg_etaC_dict[h_a_name] = selected_cacg_etaC # Save result. pass_constraints['selected_cacg_etaC_dict'] = selected_cacg_etaC_dict @@ -1007,18 +876,10 @@ def assemble_single_heliostat_etaC_dict(pass_constraints): def section_analysis(section_context, heliostat_name_list, assess_heliostat_name_list): # Notify progress. - print( - 'Constructing UFACET section ' - + heliostat_name_list[0] - + '-' - + heliostat_name_list[-1] - + ' analysis...' - ) + print('Constructing UFACET section ' + heliostat_name_list[0] + '-' + heliostat_name_list[-1] + ' analysis...') # Construct minimum safe altitude line for this section. - section_context = construct_minimum_safe_altitude_line( - section_context, heliostat_name_list - ) + section_context = construct_minimum_safe_altitude_line(section_context, heliostat_name_list) # Construct family of flight paths, indexed by altitude. section_context = construct_flight_path_family(section_context) @@ -1026,18 +887,14 @@ def section_analysis(section_context, heliostat_name_list, assess_heliostat_name # Per-heliostat constraint analysis. per_heliostat_constraints = {} for assess_heliostat_name in assess_heliostat_name_list: - constraints = single_heliostat_section_analysis( - section_context, heliostat_name_list, assess_heliostat_name - ) + constraints = single_heliostat_section_analysis(section_context, heliostat_name_list, assess_heliostat_name) per_heliostat_constraints[assess_heliostat_name] = constraints # Full pass analysis. pass_constraints = {} pass_constraints['per_heliostat_constraints'] = per_heliostat_constraints pass_constraints = multi_heliostat_gaze_angle_analysis(pass_constraints) - pass_constraints = multi_heliostat_gaze_angle_selection( - section_context, pass_constraints - ) + pass_constraints = multi_heliostat_gaze_angle_selection(section_context, pass_constraints) pass_constraints = multi_heliostat_vertical_fov_analysis(pass_constraints) pass_constraints = multi_heliostat_construct_flight_path( section_context, assess_heliostat_name_list, pass_constraints diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis_render.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis_render.py index c471bb64..b50397ee 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis_render.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis_render.py @@ -35,33 +35,19 @@ def draw_key_points(view, at_pq, ab_pq, t_pq_list, bb_pq): view.draw_pq(at_pq, style=rcps.marker(color=color_at), label='at: Assessed top') view.draw_pq(ab_pq, style=rcps.marker(color=color_ab), label='ab: Assessed bottom') view.draw_pq_text( - at_pq, - 'at', - style=rct.RenderControlText( - color=color_at, horizontalalignment='left', verticalalignment='top' - ), + at_pq, 'at', style=rct.RenderControlText(color=color_at, horizontalalignment='left', verticalalignment='top') ) view.draw_pq_text( - ab_pq, - 'ab', - style=rct.RenderControlText( - color=color_ab, horizontalalignment='left', verticalalignment='top' - ), + ab_pq, 'ab', style=rct.RenderControlText(color=color_ab, horizontalalignment='left', verticalalignment='top') ) t_idx = 1 for t_pq in t_pq_list: - view.draw_pq( - t_pq, - style=rcps.marker(color=color_t), - label='t{0:d}: Target {0:d} top'.format(t_idx), - ) + view.draw_pq(t_pq, style=rcps.marker(color=color_t), label='t{0:d}: Target {0:d} top'.format(t_idx)) view.draw_pq_text( t_pq, 't{0:d}'.format(t_idx), - style=rct.RenderControlText( - color=color_t, horizontalalignment='left', verticalalignment='top' - ), + style=rct.RenderControlText(color=color_t, horizontalalignment='left', verticalalignment='top'), ) t_idx += 1 @@ -69,38 +55,22 @@ def draw_key_points(view, at_pq, ab_pq, t_pq_list, bb_pq): view.draw_pq_text( bb_pq, 'bb', - style=rct.RenderControlText( - color=color_bb, horizontalalignment='left', verticalalignment='top' - ), - ) - view.draw_pq( - bb_pq, style=rcps.marker(color=color_bb), label='bb: Background bottom' + style=rct.RenderControlText(color=color_bb, horizontalalignment='left', verticalalignment='top'), ) + view.draw_pq(bb_pq, style=rcps.marker(color=color_bb), label='bb: Background bottom') def draw_sca_point(view, sca_pq, color): - view.draw_pq( - sca_pq, style=rcps.marker(color=color), label='sca: Start critical altitude' - ) + view.draw_pq(sca_pq, style=rcps.marker(color=color), label='sca: Start critical altitude') view.draw_pq_text( - sca_pq, - 'sca', - style=rct.RenderControlText( - color=color, horizontalalignment='left', verticalalignment='top' - ), + sca_pq, 'sca', style=rct.RenderControlText(color=color, horizontalalignment='left', verticalalignment='top') ) def draw_eca_point(view, eca_pq, color): - view.draw_pq( - eca_pq, style=rcps.marker(color=color), label='eca: End critical altitude' - ) + view.draw_pq(eca_pq, style=rcps.marker(color=color), label='eca: End critical altitude') view.draw_pq_text( - eca_pq, - 'eca', - style=rct.RenderControlText( - color=color, horizontalalignment='left', verticalalignment='top' - ), + eca_pq, 'eca', style=rct.RenderControlText(color=color, horizontalalignment='left', verticalalignment='top') ) @@ -113,9 +83,7 @@ def draw_surface_normal(view, ab_pq, nu, color): def draw_constraint_lower_bound(section_context, view, ray, color, label): - extended_ray = g2d.extend_ray( - ray, section_context['clip_pq_box'], fail_if_null_result=False - ) + extended_ray = g2d.extend_ray(ray, section_context['clip_pq_box'], fail_if_null_result=False) # This is rendering code, so if the ray is outside the bounding box, we want to do the best we can and keep going. if extended_ray == None: extended_ray = ray @@ -128,9 +96,7 @@ def draw_constraint_lower_bound(section_context, view, ray, color, label): def draw_constraint_upper_bound(section_context, view, ray, color, label): - extended_ray = g2d.extend_ray( - ray, section_context['clip_pq_box'], fail_if_null_result=False - ) + extended_ray = g2d.extend_ray(ray, section_context['clip_pq_box'], fail_if_null_result=False) # This is rendering code, so if the ray is outside the bounding box, we want to do the best we can and keep going. if extended_ray == None: extended_ray = ray @@ -142,15 +108,10 @@ def draw_constraint_upper_bound(section_context, view, ray, color, label): view.draw_pq_list([head_pq, dir_pq], style=rcps.outline(color=color)) -def draw_heliostat_section( - figure_control, section_context, heliostat_name_list, analysis_render_control -): +def draw_heliostat_section(figure_control, section_context, heliostat_name_list, analysis_render_control): # Setup view. fig_record = fm.setup_figure_for_3d_data( - figure_control, - rca.meters(), - section_context['view_spec'], - title='N-S Pass Section', + figure_control, rca.meters(), section_context['view_spec'], title='N-S Pass Section' ) view = fig_record.view fig_record.comment.append("Path segment analysis aection.") @@ -167,15 +128,11 @@ def draw_heliostat_section( # Draw safe altitude lines. if analysis_render_control.draw_context_mnsa_ray: view.draw_pq_list( - section_context['mnsa_ray'], - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), + section_context['mnsa_ray'], style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None) ) if analysis_render_control.draw_context_mxsa_ray: view.draw_pq_list( - section_context['mxsa_ray'], - style=rcps.RenderControlPointSeq(linestyle='--', color='r', marker=None), + section_context['mxsa_ray'], style=rcps.RenderControlPointSeq(linestyle='--', color='r', marker=None) ) # Finish the figure. @@ -186,12 +143,7 @@ def draw_heliostat_section( def draw_single_heliostat_constraint_analysis( - figure_control, - section_context, - heliostat_name_list, - assess_heliostat_name, - constraints, - analysis_render_control, + figure_control, section_context, heliostat_name_list, assess_heliostat_name, constraints, analysis_render_control ): # Setup view. fig_record = fm.setup_figure_for_3d_data( @@ -199,12 +151,7 @@ def draw_single_heliostat_constraint_analysis( rca.meters(), section_context['view_spec'], title=( - assess_heliostat_name - + ' in ' - + heliostat_name_list[0] - + '-' - + heliostat_name_list[-1] - + ' Pass Section' + assess_heliostat_name + ' in ' + heliostat_name_list[0] + '-' + heliostat_name_list[-1] + ' Pass Section' ), ) view = fig_record.view @@ -216,9 +163,7 @@ def draw_single_heliostat_constraint_analysis( heliostat = section_context['solar_field'].lookup_heliostat(heliostat_name) # Style setup if heliostat_name == assess_heliostat_name: - heliostat_style = rch.name_outline( - color='m', horizontalalignment='left', verticalalignment='top' - ) + heliostat_style = rch.name_outline(color='m', horizontalalignment='left', verticalalignment='top') else: heliostat_style = rch.outline() heliostat_styles = rce.RenderControlEnsemble(heliostat_style) @@ -234,14 +179,10 @@ def draw_single_heliostat_constraint_analysis( bb_pq = constraints['bb_pq'] nu = constraints['nu'] abv_lb = constraints['abv_lb'] # Assessed bottom visibility, p lower bound. - abvm_lb = constraints[ - 'abvm_lb' - ] # Assessed bottom visibility margin, p lower bound. + abvm_lb = constraints['abvm_lb'] # Assessed bottom visibility margin, p lower bound. atv_lb = constraints['atv_lb'] # Assessed top visibility, p lower bound. atvm_lb = constraints['atvm_lb'] # Assessed top visibility margin, p lower bound. - ts_ub_list = constraints[ - 'ts_ub_list' - ] # Target reflection start list, p upper bound. + ts_ub_list = constraints['ts_ub_list'] # Target reflection start list, p upper bound. ts_ub = constraints['ts_ub'] # Target reflection start, p upper bound. tsm_ub = constraints['tsm_ub'] # Target reflection margin, p upper bound. sca_pq = constraints['sca_pq'] # Path start critical altitude point. @@ -255,17 +196,9 @@ def draw_single_heliostat_constraint_analysis( # Draw safe altitude lines. if analysis_render_control.draw_single_heliostat_constraints_mnsa_ray: - view.draw_pq_list( - mnsa_ray, - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), - ) + view.draw_pq_list(mnsa_ray, style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None)) if analysis_render_control.draw_single_heliostat_constraints_mxsa_ray: - view.draw_pq_list( - mxsa_ray, - style=rcps.RenderControlPointSeq(linestyle='--', color='r', marker=None), - ) + view.draw_pq_list(mxsa_ray, style=rcps.RenderControlPointSeq(linestyle='--', color='r', marker=None)) # Draw key points. if analysis_render_control.draw_single_heliostat_constraints_key_points: @@ -281,120 +214,64 @@ def draw_single_heliostat_constraint_analysis( # Start pass (at or above, at or before). if abv_lb: draw_constraint_lower_bound( - section_context, - view, - abv_lb, - color='r', - label='Assessed bottom visibility, p lower bound', + section_context, view, abv_lb, color='r', label='Assessed bottom visibility, p lower bound' ) if abvm_lb: draw_constraint_lower_bound( - section_context, - view, - abvm_lb, - color='pink', - label='Assessed bottom visibility, p lower bound', + section_context, view, abvm_lb, color='pink', label='Assessed bottom visibility, p lower bound' ) if atv_lb: draw_constraint_lower_bound( - section_context, - view, - atv_lb, - color='b', - label='Assessed top visibility, p lower bound', + section_context, view, atv_lb, color='b', label='Assessed top visibility, p lower bound' ) if atvm_lb: draw_constraint_lower_bound( - section_context, - view, - atvm_lb, - color='skyblue', - label='Top visibility margin, p lower bound', + section_context, view, atvm_lb, color='skyblue', label='Top visibility margin, p lower bound' ) if len(ts_ub_list) > 0: # The first target dominates. draw_constraint_upper_bound( - section_context, - view, - ts_ub, - color='g', - label='Target reflection start, p upper bound', + section_context, view, ts_ub, color='g', label='Target reflection start, p upper bound' ) if len(ts_ub_list) > 1: - if ( - analysis_render_control.draw_single_heliostat_constraints_all_targets - ): + if analysis_render_control.draw_single_heliostat_constraints_all_targets: for ts_ub2 in ts_ub_list[1:]: # Differentiate from ts_ub. draw_constraint_upper_bound( - section_context, - view, - ts_ub2, - color='g', - label='Target reflection start, p upper bound', + section_context, view, ts_ub2, color='g', label='Target reflection start, p upper bound' ) if tsm_ub: draw_constraint_upper_bound( - section_context, - view, - tsm_ub, - color='c', - label='Target reflection margin, p upper bound', + section_context, view, tsm_ub, color='c', label='Target reflection margin, p upper bound' ) if len(te_lb_list) > 0: # The first target dominates. draw_constraint_lower_bound( - section_context, - view, - te_lb, - color='g', - label='Target reflection end, p lower bound', + section_context, view, te_lb, color='g', label='Target reflection end, p lower bound' ) if len(te_lb_list) > 1: - if ( - analysis_render_control.draw_single_heliostat_constraints_all_targets - ): + if analysis_render_control.draw_single_heliostat_constraints_all_targets: for te_lb2 in te_lb_list[1:]: # Differentiate from te_lb. # These are nearly superimposed. draw_constraint_lower_bound( - section_context, - view, - te_lb2, - color='g', - label='Target reflection end, p lower bound', + section_context, view, te_lb2, color='g', label='Target reflection end, p lower bound' ) if tem_lb: draw_constraint_upper_bound( - section_context, - view, - tem_lb, - color='olive', - label='Target reflection margin, p upper bound', + section_context, view, tem_lb, color='olive', label='Target reflection margin, p upper bound' ) draw_constraint_upper_bound( - section_context, - view, - pln_ub, - color='c', - label='Mirror plane, p upper bound', + section_context, view, pln_ub, color='c', label='Mirror plane, p upper bound' ) # Reflection end dominates. if analysis_render_control.draw_single_heliostat_constraints_summary: draw_sca_point(view, sca_pq, 'r') draw_eca_point(view, eca_pq, 'b') - draw_constraint_upper_bound( - section_context, view, s_locus, color='r', label='Valid pass start points' - ) - draw_constraint_lower_bound( - section_context, view, e_locus, color='b', label='Valid pass end points' - ) + draw_constraint_upper_bound(section_context, view, s_locus, color='r', label='Valid pass start points') + draw_constraint_lower_bound(section_context, view, e_locus, color='b', label='Valid pass end points') # Draw example gaze constraints. if analysis_render_control.draw_single_heliostat_constraints_gaze_example: C_example = analysis_render_control.gaze_example_C(section_context) - C_example = mt.clamp( - C_example, - section_context['path_family_C_min'], - section_context['path_family_C_max'], - ) + C_example = mt.clamp(C_example, section_context['path_family_C_min'], section_context['path_family_C_max']) for path_s_pq, path_e_pq, ray_min_eta, ray_max_eta, min_etaC, max_etaC in zip( constraints['path_s_pq_list'], constraints['path_e_pq_list'], @@ -405,23 +282,10 @@ def draw_single_heliostat_constraint_analysis( ): if min_etaC[1] > C_example: # Inexact match. view.draw_pq_list( - [path_s_pq, path_e_pq], - style=rcps.RenderControlPointSeq( - linestyle='--', color='g', marker=None - ), - ) - view.draw_pq_list( - ray_min_eta, - style=rcps.RenderControlPointSeq( - linestyle=':', color='b', marker=None - ), - ) - view.draw_pq_list( - ray_max_eta, - style=rcps.RenderControlPointSeq( - linestyle=':', color='r', marker=None - ), + [path_s_pq, path_e_pq], style=rcps.RenderControlPointSeq(linestyle='--', color='g', marker=None) ) + view.draw_pq_list(ray_min_eta, style=rcps.RenderControlPointSeq(linestyle=':', color='b', marker=None)) + view.draw_pq_list(ray_max_eta, style=rcps.RenderControlPointSeq(linestyle=':', color='r', marker=None)) break # Finish the figure. @@ -441,25 +305,15 @@ def draw_single_heliostat_gaze_angle_analysis( # Setup figure. fig_record = fm.setup_figure_for_3d_data( figure_control, - rca.RenderControlAxis( - x_label='eta (deg)', y_label='C (m)', z_label='', grid=True - ), + rca.RenderControlAxis(x_label='eta (deg)', y_label='C (m)', z_label='', grid=True), vs.view_spec_xy(), title=(h_a_name + ' Gaze Angle Analysis'), ) view = fig_record.view # Draw bounding curves. - view.draw_pq_list( - a.p2deg(min_etaC_list), - style=rcps.data_curve(color='b'), - label='{0:s} eta_min'.format(h_a_name), - ) - view.draw_pq_list( - a.p2deg(max_etaC_list), - style=rcps.data_curve(color='r'), - label='{0:s} eta_max'.format(h_a_name), - ) + view.draw_pq_list(a.p2deg(min_etaC_list), style=rcps.data_curve(color='b'), label='{0:s} eta_min'.format(h_a_name)) + view.draw_pq_list(a.p2deg(max_etaC_list), style=rcps.data_curve(color='r'), label='{0:s} eta_max'.format(h_a_name)) # Start and end points for altitude lines. draw_eta_min = min([pq[0] for pq in min_etaC_list]) - np.deg2rad(5.0) @@ -477,9 +331,7 @@ def draw_single_heliostat_gaze_angle_analysis( if analysis_render_control.draw_single_heliostat_gaze_angle_mnsa: view.draw_pq_list( a.p2deg([[draw_eta_min, C_mnsa], [draw_eta_max, C_mnsa]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None), label='Minimum Safe', ) @@ -488,27 +340,19 @@ def draw_single_heliostat_gaze_angle_analysis( C_critical = constraints['C_critical'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_critical], [draw_eta_max, C_critical]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='orange', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='orange', marker=None), label='Critical', ) # Example C value. if analysis_render_control.draw_single_heliostat_gaze_angle_example: C_example = analysis_render_control.gaze_example_C(section_context) - C_example = mt.clamp( - C_example, - section_context['path_family_C_min'], - section_context['path_family_C_max'], - ) + C_example = mt.clamp(C_example, section_context['path_family_C_min'], section_context['path_family_C_max']) for min_etaC, max_etaC in zip(min_etaC_list, max_etaC_list): if min_etaC[1] > C_example: # Inexact match. view.draw_pq_list( a.p2deg([min_etaC, max_etaC]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='c', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='c', marker=None), ) break @@ -516,10 +360,7 @@ def draw_single_heliostat_gaze_angle_analysis( if analysis_render_control.draw_single_heliostat_gaze_angle_fill: for min_etaC, max_etaC in zip(min_etaC_list, max_etaC_list): view.draw_pq_list( - a.p2deg([min_etaC, max_etaC]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='c', marker=None - ), + a.p2deg([min_etaC, max_etaC]), style=rcps.RenderControlPointSeq(linestyle='--', color='c', marker=None) ) # Finish the figure. @@ -529,15 +370,11 @@ def draw_single_heliostat_gaze_angle_analysis( return view -def draw_single_heliostat_select_gaze( - figure_control, section_context, h_a_name, constraints, analysis_render_control -): +def draw_single_heliostat_select_gaze(figure_control, section_context, h_a_name, constraints, analysis_render_control): # Setup figure. fig_record = fm.setup_figure_for_3d_data( figure_control, - rca.RenderControlAxis( - x_label='eta (deg)', y_label='C (m)', z_label='', grid=True - ), + rca.RenderControlAxis(x_label='eta (deg)', y_label='C (m)', z_label='', grid=True), vs.view_spec_xy(), title=(h_a_name + ' Gaze Angle and Altitude Selection'), ) @@ -563,14 +400,10 @@ def draw_single_heliostat_select_gaze( envelope_min_etaC_list = constraints['envelope_min_etaC_list'] envelope_max_etaC_list = constraints['envelope_max_etaC_list'] view.draw_pq_list( - a.p2deg(envelope_min_etaC_list), - style=rcps.data_curve(color='b'), - label='Gaze Envelope eta_min', + a.p2deg(envelope_min_etaC_list), style=rcps.data_curve(color='b'), label='Gaze Envelope eta_min' ) view.draw_pq_list( - a.p2deg(envelope_max_etaC_list), - style=rcps.data_curve(color='r'), - label='Gaze Envelope eta_max', + a.p2deg(envelope_max_etaC_list), style=rcps.data_curve(color='r'), label='Gaze Envelope eta_max' ) # Draw gaze angle envelope after shrinking for uncertainty. @@ -578,14 +411,10 @@ def draw_single_heliostat_select_gaze( shrunk_min_etaC_list = constraints['shrunk_min_etaC_list'] shrunk_max_etaC_list = constraints['shrunk_max_etaC_list'] view.draw_pq_list( - a.p2deg(shrunk_min_etaC_list), - style=rcps.data_curve(color='b', linewidth=1.5), - label='Shrunk eta_min', + a.p2deg(shrunk_min_etaC_list), style=rcps.data_curve(color='b', linewidth=1.5), label='Shrunk eta_min' ) view.draw_pq_list( - a.p2deg(shrunk_max_etaC_list), - style=rcps.data_curve(color='r', linewidth=1.5), - label='Shrunk eta_max', + a.p2deg(shrunk_max_etaC_list), style=rcps.data_curve(color='r', linewidth=1.5), label='Shrunk eta_max' ) # Draw gaze angle envelope after clipping for gaze angle limits. @@ -593,14 +422,10 @@ def draw_single_heliostat_select_gaze( clipped_min_etaC_list = constraints['clipped_min_etaC_list'] clipped_max_etaC_list = constraints['clipped_max_etaC_list'] view.draw_pq_list( - a.p2deg(clipped_min_etaC_list), - style=rcps.data_curve(color='b', linewidth=1.5), - label='clipped eta_min', + a.p2deg(clipped_min_etaC_list), style=rcps.data_curve(color='b', linewidth=1.5), label='clipped eta_min' ) view.draw_pq_list( - a.p2deg(clipped_max_etaC_list), - style=rcps.data_curve(color='r', linewidth=1.5), - label='clipped eta_max', + a.p2deg(clipped_max_etaC_list), style=rcps.data_curve(color='r', linewidth=1.5), label='clipped eta_max' ) # Draw selected gaze angle and altitude. @@ -613,12 +438,8 @@ def draw_single_heliostat_select_gaze( ) # Start and end points for altitude lines. - draw_eta_min = min( - [pq[0] for pq in constraints['shifted_min_etaC_list']] - ) - np.deg2rad(5.0) - draw_eta_max = max( - [pq[0] for pq in constraints['shifted_max_etaC_list']] - ) + np.deg2rad(5.0) + draw_eta_min = min([pq[0] for pq in constraints['shifted_min_etaC_list']]) - np.deg2rad(5.0) + draw_eta_max = max([pq[0] for pq in constraints['shifted_max_etaC_list']]) + np.deg2rad(5.0) # Safe altitudes. if analysis_render_control.draw_single_heliostat_select_gaze_mxsa: @@ -632,9 +453,7 @@ def draw_single_heliostat_select_gaze( C_mnsa = section_context['path_family_C_mnsa'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_mnsa], [draw_eta_max, C_mnsa]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None), label='Minimum Safe', ) @@ -643,9 +462,7 @@ def draw_single_heliostat_select_gaze( C_critical = constraints['C_critical'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_critical], [draw_eta_max, C_critical]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='orange', linewidth=0.5, marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='orange', linewidth=0.5, marker=None), label='C_critical', ) @@ -655,10 +472,7 @@ def draw_single_heliostat_select_gaze( clipped_max_etaC_list = constraints['clipped_max_etaC_list'] for min_etaC, max_etaC in zip(clipped_min_etaC_list, clipped_max_etaC_list): view.draw_pq_list( - a.p2deg([min_etaC, max_etaC]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='c', marker=None - ), + a.p2deg([min_etaC, max_etaC]), style=rcps.RenderControlPointSeq(linestyle='--', color='c', marker=None) ) # Finish the figure. @@ -674,9 +488,7 @@ def draw_multi_heliostat_gaze_angle_analysis( # Setup figure. fig_record = fm.setup_figure_for_3d_data( figure_control, - rca.RenderControlAxis( - x_label='eta (deg)', y_label='C (m)', z_label='', grid=True - ), + rca.RenderControlAxis(x_label='eta (deg)', y_label='C (m)', z_label='', grid=True), vs.view_spec_xy(), title='Full-Pass Gaze Angle Analysis', ) @@ -714,16 +526,8 @@ def draw_multi_heliostat_gaze_angle_analysis( if analysis_render_control.draw_multi_heliostat_gaze_angle_envelope: pass_min_etaC_list = pass_constraints['pass_min_etaC_list'] pass_max_etaC_list = pass_constraints['pass_max_etaC_list'] - view.draw_pq_list( - a.p2deg(pass_min_etaC_list), - style=rcps.data_curve(color='b'), - label='Pass eta_min', - ) - view.draw_pq_list( - a.p2deg(pass_max_etaC_list), - style=rcps.data_curve(color='r'), - label='Pass eta_max', - ) + view.draw_pq_list(a.p2deg(pass_min_etaC_list), style=rcps.data_curve(color='b'), label='Pass eta_min') + view.draw_pq_list(a.p2deg(pass_max_etaC_list), style=rcps.data_curve(color='r'), label='Pass eta_max') # Start and end points for altitude lines. draw_eta_min = min([pq[0] for pq in pass_min_etaC_list]) - np.deg2rad(5.0) @@ -741,9 +545,7 @@ def draw_multi_heliostat_gaze_angle_analysis( C_mnsa = section_context['path_family_C_mnsa'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_mnsa], [draw_eta_max, C_mnsa]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None), label='Minimum Safe', ) @@ -756,9 +558,7 @@ def draw_multi_heliostat_gaze_angle_analysis( C_critical = constraints['C_critical'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_critical], [draw_eta_max, C_critical]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color=c.color(color_idx), linewidth=0.5, marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color=c.color(color_idx), linewidth=0.5, marker=None), label='{0:s} C_critical'.format(assess_heliostat_name), ) color_idx += 1 @@ -766,20 +566,14 @@ def draw_multi_heliostat_gaze_angle_analysis( # Draw eta range for an example C value. if analysis_render_control.draw_multi_heliostat_gaze_angle_example: C_example = analysis_render_control.gaze_example_C(section_context) - C_example = mt.clamp( - C_example, - section_context['path_family_C_min'], - section_context['path_family_C_max'], - ) + C_example = mt.clamp(C_example, section_context['path_family_C_min'], section_context['path_family_C_max']) pass_min_etaC_list = pass_constraints['pass_min_etaC_list'] pass_max_etaC_list = pass_constraints['pass_max_etaC_list'] for min_etaC, max_etaC in zip(pass_min_etaC_list, pass_max_etaC_list): if min_etaC[1] > C_example: # Inexact match. view.draw_pq_list( a.p2deg([min_etaC, max_etaC]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='c', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='c', marker=None), ) break @@ -789,10 +583,7 @@ def draw_multi_heliostat_gaze_angle_analysis( pass_max_etaC_list = pass_constraints['pass_max_etaC_list'] for min_etaC, max_etaC in zip(pass_min_etaC_list, pass_max_etaC_list): view.draw_pq_list( - a.p2deg([min_etaC, max_etaC]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='c', marker=None - ), + a.p2deg([min_etaC, max_etaC]), style=rcps.RenderControlPointSeq(linestyle='--', color='c', marker=None) ) # Finish the figure. @@ -802,17 +593,10 @@ def draw_multi_heliostat_gaze_angle_analysis( return view -def draw_required_vertical_field_of_view( - figure_control, section_context, pass_constraints, analysis_render_control -): +def draw_required_vertical_field_of_view(figure_control, section_context, pass_constraints, analysis_render_control): fig_record = fm.setup_figure_for_3d_data( figure_control, - rca.RenderControlAxis( - x_label='Minimum Required Vertical FOV (deg)', - y_label='C (m)', - z_label='', - grid=True, - ), + rca.RenderControlAxis(x_label='Minimum Required Vertical FOV (deg)', y_label='C (m)', z_label='', grid=True), vs.view_spec_xy(), title='Vertical Field of View Analysis', ) @@ -842,9 +626,7 @@ def draw_required_vertical_field_of_view( C_mnsa = section_context['path_family_C_mnsa'] view.draw_pq_list( a.p2deg([[draw_fov_min, C_mnsa], [draw_fov_max, C_mnsa]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None), label='Minimum Safe', ) @@ -857,9 +639,7 @@ def draw_required_vertical_field_of_view( C_critical = constraints['C_critical'] view.draw_pq_list( a.p2deg([[draw_fov_min, C_critical], [draw_fov_max, C_critical]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color=c.color(color_idx), linewidth=0.5, marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color=c.color(color_idx), linewidth=0.5, marker=None), label='{0:s} C_critical'.format(assess_heliostat_name), ) color_idx += 1 @@ -875,52 +655,34 @@ def draw_required_vertical_field_of_view( draw_C_max = C_mxsa + 2.0 # m if vertical_fov_min == vertical_fov_max: view.draw_pq_list( - a.p2deg( - [[vertical_fov_min, draw_C_min], [vertical_fov_min, draw_C_max]] - ), - style=rcps.RenderControlPointSeq( - linestyle='--', color='g', linewidth=1.5, marker=None - ), + a.p2deg([[vertical_fov_min, draw_C_min], [vertical_fov_min, draw_C_max]]), + style=rcps.RenderControlPointSeq(linestyle='--', color='g', linewidth=1.5, marker=None), label='{0:s} FOV'.format(camera.name), ) else: view.draw_pq_list( - a.p2deg( - [[vertical_fov_min, draw_C_min], [vertical_fov_min, draw_C_max]] - ), - style=rcps.RenderControlPointSeq( - linestyle='--', color='g', linewidth=1.5, marker=None - ), + a.p2deg([[vertical_fov_min, draw_C_min], [vertical_fov_min, draw_C_max]]), + style=rcps.RenderControlPointSeq(linestyle='--', color='g', linewidth=1.5, marker=None), label='{0:s} min FOV'.format(camera.name), ) view.draw_pq_list( - a.p2deg( - [[vertical_fov_max, draw_C_min], [vertical_fov_max, draw_C_max]] - ), - style=rcps.RenderControlPointSeq( - linestyle='--', color='g', linewidth=1.5, marker=None - ), + a.p2deg([[vertical_fov_max, draw_C_min], [vertical_fov_max, draw_C_max]]), + style=rcps.RenderControlPointSeq(linestyle='--', color='g', linewidth=1.5, marker=None), label='{0:s} max FOV'.format(camera.name), ) # Finish the figure. - view.show( - legend=analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend - ) + view.show(legend=analysis_render_control.draw_multi_heliostat_vertical_fov_required_legend) # Return. return view -def draw_multi_heliostat_select_gaze( - figure_control, section_context, pass_constraints, analysis_render_control -): +def draw_multi_heliostat_select_gaze(figure_control, section_context, pass_constraints, analysis_render_control): # Setup figure. fig_record = fm.setup_figure_for_3d_data( figure_control, - rca.RenderControlAxis( - x_label='eta (deg)', y_label='C (m)', z_label='', grid=True - ), + rca.RenderControlAxis(x_label='eta (deg)', y_label='C (m)', z_label='', grid=True), vs.view_spec_xy(), title='Gaze Angle and Altitude Selection', ) @@ -946,14 +708,10 @@ def draw_multi_heliostat_select_gaze( envelope_min_etaC_list = pass_constraints['envelope_min_etaC_list'] envelope_max_etaC_list = pass_constraints['envelope_max_etaC_list'] view.draw_pq_list( - a.p2deg(envelope_min_etaC_list), - style=rcps.data_curve(color='b'), - label='Gaze Envelope eta_min', + a.p2deg(envelope_min_etaC_list), style=rcps.data_curve(color='b'), label='Gaze Envelope eta_min' ) view.draw_pq_list( - a.p2deg(envelope_max_etaC_list), - style=rcps.data_curve(color='r'), - label='Gaze Envelope eta_max', + a.p2deg(envelope_max_etaC_list), style=rcps.data_curve(color='r'), label='Gaze Envelope eta_max' ) # Draw gaze angle envelope after shrinking for uncertainty. @@ -961,14 +719,10 @@ def draw_multi_heliostat_select_gaze( shrunk_min_etaC_list = pass_constraints['shrunk_min_etaC_list'] shrunk_max_etaC_list = pass_constraints['shrunk_max_etaC_list'] view.draw_pq_list( - a.p2deg(shrunk_min_etaC_list), - style=rcps.data_curve(color='b', linewidth=1.5), - label='Shrunk eta_min', + a.p2deg(shrunk_min_etaC_list), style=rcps.data_curve(color='b', linewidth=1.5), label='Shrunk eta_min' ) view.draw_pq_list( - a.p2deg(shrunk_max_etaC_list), - style=rcps.data_curve(color='r', linewidth=1.5), - label='Shrunk eta_max', + a.p2deg(shrunk_max_etaC_list), style=rcps.data_curve(color='r', linewidth=1.5), label='Shrunk eta_max' ) # Draw gaze angle envelope after clipping for gaze angle limits. @@ -976,14 +730,10 @@ def draw_multi_heliostat_select_gaze( clipped_min_etaC_list = pass_constraints['clipped_min_etaC_list'] clipped_max_etaC_list = pass_constraints['clipped_max_etaC_list'] view.draw_pq_list( - a.p2deg(clipped_min_etaC_list), - style=rcps.data_curve(color='b', linewidth=1.5), - label='clipped eta_min', + a.p2deg(clipped_min_etaC_list), style=rcps.data_curve(color='b', linewidth=1.5), label='clipped eta_min' ) view.draw_pq_list( - a.p2deg(clipped_max_etaC_list), - style=rcps.data_curve(color='r', linewidth=1.5), - label='clipped eta_max', + a.p2deg(clipped_max_etaC_list), style=rcps.data_curve(color='r', linewidth=1.5), label='clipped eta_max' ) # Draw selected gaze angle and altitude. @@ -996,12 +746,8 @@ def draw_multi_heliostat_select_gaze( ) # Start and end points for altitude lines. - draw_eta_min = min( - [pq[0] for pq in pass_constraints['shifted_min_etaC_list']] - ) - np.deg2rad(5.0) - draw_eta_max = max( - [pq[0] for pq in pass_constraints['shifted_max_etaC_list']] - ) + np.deg2rad(5.0) + draw_eta_min = min([pq[0] for pq in pass_constraints['shifted_min_etaC_list']]) - np.deg2rad(5.0) + draw_eta_max = max([pq[0] for pq in pass_constraints['shifted_max_etaC_list']]) + np.deg2rad(5.0) # Safe altitudes. if analysis_render_control.draw_multi_heliostat_select_gaze_mxsa: @@ -1015,9 +761,7 @@ def draw_multi_heliostat_select_gaze( C_mnsa = section_context['path_family_C_mnsa'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_mnsa], [draw_eta_max, C_mnsa]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None), label='Minimum Safe', ) @@ -1030,9 +774,7 @@ def draw_multi_heliostat_select_gaze( C_critical = constraints['C_critical'] view.draw_pq_list( a.p2deg([[draw_eta_min, C_critical], [draw_eta_max, C_critical]]), - style=rcps.RenderControlPointSeq( - linestyle='--', color=c.color(color_idx), linewidth=0.5, marker=None - ), + style=rcps.RenderControlPointSeq(linestyle='--', color=c.color(color_idx), linewidth=0.5, marker=None), ) # label='{0:s} C_critical'.format(assess_heliostat_name)) color_idx += 1 @@ -1043,10 +785,7 @@ def draw_multi_heliostat_select_gaze( clipped_max_etaC_list = pass_constraints['clipped_max_etaC_list'] for min_etaC, max_etaC in zip(clipped_min_etaC_list, clipped_max_etaC_list): view.draw_pq_list( - a.p2deg([min_etaC, max_etaC]), - style=rcps.RenderControlPointSeq( - linestyle='--', color='c', marker=None - ), + a.p2deg([min_etaC, max_etaC]), style=rcps.RenderControlPointSeq(linestyle='--', color='c', marker=None) ) # Finish the figure. @@ -1057,23 +796,14 @@ def draw_multi_heliostat_select_gaze( def draw_multi_heliostat_result( - figure_control, - section_context, - heliostat_name_list, - pass_constraints, - analysis_render_control, + figure_control, section_context, heliostat_name_list, pass_constraints, analysis_render_control ): # Setup view. fig_record = fm.setup_figure_for_3d_data( figure_control, rca.meters(), section_context['view_spec'], - title=( - heliostat_name_list[0] - + '-' - + heliostat_name_list[-1] - + ' Pass Section Result' - ), + title=(heliostat_name_list[0] + '-' + heliostat_name_list[-1] + ' Pass Section Result'), ) view = fig_record.view fig_record.comment.append("Scan pass section analysis result,") @@ -1091,18 +821,10 @@ def draw_multi_heliostat_result( # Draw safe altitude lines. if analysis_render_control.draw_multi_heliostat_result_mnsa_ray: mnsa_ray = section_context['mnsa_ray'] - view.draw_pq_list( - mnsa_ray, - style=rcps.RenderControlPointSeq( - linestyle='--', color='brown', marker=None - ), - ) + view.draw_pq_list(mnsa_ray, style=rcps.RenderControlPointSeq(linestyle='--', color='brown', marker=None)) if analysis_render_control.draw_multi_heliostat_result_mxsa_ray: mxsa_ray = section_context['mxsa_ray'] - view.draw_pq_list( - mxsa_ray, - style=rcps.RenderControlPointSeq(linestyle='--', color='r', marker=None), - ) + view.draw_pq_list(mxsa_ray, style=rcps.RenderControlPointSeq(linestyle='--', color='r', marker=None)) # Draw selected flight path line. if analysis_render_control.draw_multi_heliostat_result_selected_cacg_line: @@ -1116,29 +838,21 @@ def draw_multi_heliostat_result( segment_p0 = selected_cacg_segment[0][0] segment_p1 = selected_cacg_segment[1][0] # Construct interval spanning both. - length_margin = ( - analysis_render_control.draw_multi_heliostat_result_length_margin - ) + length_margin = analysis_render_control.draw_multi_heliostat_result_length_margin p0 = min(mnsa_p0, (segment_p0 - length_margin)) p1 = max(mnsa_p1, (segment_p1 + length_margin)) # Construct embedding line. selected_cacg_line_q0 = g2d.homogeneous_line_y_given_x(p0, selected_cacg_line) selected_cacg_line_q1 = g2d.homogeneous_line_y_given_x(p1, selected_cacg_line) - selected_cacg_line_ray = [ - [p0, selected_cacg_line_q0], - [p1, selected_cacg_line_q1], - ] + selected_cacg_line_ray = [[p0, selected_cacg_line_q0], [p1, selected_cacg_line_q1]] view.draw_pq_list( - selected_cacg_line_ray, - style=rcps.RenderControlPointSeq(linestyle='--', color='g', marker=None), + selected_cacg_line_ray, style=rcps.RenderControlPointSeq(linestyle='--', color='g', marker=None) ) # Draw selected flight path segment. if analysis_render_control.draw_multi_heliostat_result_selected_cacg_segment: selected_cacg_segment = pass_constraints['selected_cacg_segment'] - view.draw_pq_list( - selected_cacg_segment, style=rcps.outline(color='g', linewidth=4) - ) + view.draw_pq_list(selected_cacg_segment, style=rcps.outline(color='g', linewidth=4)) # Draw start and end loci. if analysis_render_control.draw_multi_heliostat_result_start_end_loci: @@ -1151,16 +865,8 @@ def draw_multi_heliostat_result( e_locus = constraints['e_locus'] # Valid pass end points. draw_sca_point(view, sca_pq, 'r') draw_eca_point(view, eca_pq, 'b') - draw_constraint_upper_bound( - section_context, - view, - s_locus, - color='r', - label='Valid pass start points', - ) - draw_constraint_lower_bound( - section_context, view, e_locus, color='b', label='Valid pass end points' - ) + draw_constraint_upper_bound(section_context, view, s_locus, color='r', label='Valid pass start points') + draw_constraint_lower_bound(section_context, view, e_locus, color='b', label='Valid pass end points') # Finish the figure. view.show(legend=analysis_render_control.draw_multi_heliostat_result_legend) @@ -1184,9 +890,7 @@ def draw_single_heliostat_etaC_dict(figure_control, pass_constraints): selected_cacg_C_list.append(selected_cacg_etaC[1]) # Plot selected gaze values. - fig_record_1 = fm.setup_figure( - figure_control, title='Gaze Angle Selected for Individual Heliostats' - ) + fig_record_1 = fm.setup_figure(figure_control, title='Gaze Angle Selected for Individual Heliostats') plt.plot(selected_cacg_eta_deg_list, '.-', color='b') plt.xlabel('Heliostat Index') plt.ylabel('eta (deg)') @@ -1196,9 +900,7 @@ def draw_single_heliostat_etaC_dict(figure_control, pass_constraints): plt.show() # Plot selected C values. - fig_record_2 = fm.setup_figure( - figure_control, title='Altitude Selected for Individual Heliostats' - ) + fig_record_2 = fm.setup_figure(figure_control, title='Altitude Selected for Individual Heliostats') plt.plot(selected_cacg_C_list, '.-', color='g') plt.xlabel('Heliostat Index') plt.ylabel('C (m)') @@ -1212,39 +914,21 @@ def draw_single_heliostat_etaC_dict(figure_control, pass_constraints): def draw_section_analysis( - figure_control, - section_context, - heliostat_name_list, - pass_constraints, - analysis_render_control, + figure_control, section_context, heliostat_name_list, pass_constraints, analysis_render_control ): # Notify progress. - print( - 'Drawing section ' - + heliostat_name_list[0] - + '-' - + heliostat_name_list[-1] - + ' analysis...' - ) + print('Drawing section ' + heliostat_name_list[0] + '-' + heliostat_name_list[-1] + ' analysis...') # Draw the section context. if analysis_render_control.draw_context: - draw_heliostat_section( - figure_control, - section_context, - heliostat_name_list, - analysis_render_control, - ) + draw_heliostat_section(figure_control, section_context, heliostat_name_list, analysis_render_control) # Draw the constraint analysis results. if analysis_render_control.draw_single_heliostat_analysis: per_heliostat_constraints = pass_constraints['per_heliostat_constraints'] for assess_heliostat_name in per_heliostat_constraints.keys(): - if ( - analysis_render_control.draw_single_heliostat_analysis_list == None - ) or ( - assess_heliostat_name - in analysis_render_control.draw_single_heliostat_analysis_list + if (analysis_render_control.draw_single_heliostat_analysis_list == None) or ( + assess_heliostat_name in analysis_render_control.draw_single_heliostat_analysis_list ): # Lookup constraints. constraints = per_heliostat_constraints[assess_heliostat_name] @@ -1261,20 +945,12 @@ def draw_section_analysis( # Draw gaze angle analysis. if analysis_render_control.draw_single_heliostat_gaze_angle: draw_single_heliostat_gaze_angle_analysis( - figure_control, - section_context, - assess_heliostat_name, - constraints, - analysis_render_control, + figure_control, section_context, assess_heliostat_name, constraints, analysis_render_control ) # Draw gaze angle selection. if analysis_render_control.draw_single_heliostat_select_gaze: draw_single_heliostat_select_gaze( - figure_control, - section_context, - assess_heliostat_name, - constraints, - analysis_render_control, + figure_control, section_context, assess_heliostat_name, constraints, analysis_render_control ) # Draw summary gaze angle analysis. @@ -1285,24 +961,16 @@ def draw_section_analysis( # Draw vertical field of view requirement. if analysis_render_control.draw_multi_heliostat_vertical_fov_required: - draw_required_vertical_field_of_view( - figure_control, section_context, pass_constraints, analysis_render_control - ) + draw_required_vertical_field_of_view(figure_control, section_context, pass_constraints, analysis_render_control) # Draw gaze angle and altitude selection. if analysis_render_control.draw_multi_heliostat_select_gaze: - draw_multi_heliostat_select_gaze( - figure_control, section_context, pass_constraints, analysis_render_control - ) + draw_multi_heliostat_select_gaze(figure_control, section_context, pass_constraints, analysis_render_control) # Draw the selected flight path in the heliostat context. if analysis_render_control.draw_multi_heliostat_result: draw_multi_heliostat_result( - figure_control, - section_context, - heliostat_name_list, - pass_constraints, - analysis_render_control, + figure_control, section_context, heliostat_name_list, pass_constraints, analysis_render_control ) # Draw the individual heliostat selected (eta,C) results. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py index f68ac5b2..a4f4c580 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py @@ -16,9 +16,7 @@ def abs_w(name_pqw): return abs(name_pqw[1][2]) -def heliostat_name_pqw_list_within_margin( - solar_field, section_view_spec, ufacet_scan_parameters -): +def heliostat_name_pqw_list_within_margin(solar_field, section_view_spec, ufacet_scan_parameters): # Fetch required control parameters. margin_w = ufacet_scan_parameters['candidate_margin_w'] # Find heliostats within margin. @@ -51,11 +49,7 @@ def select_min_w_reject_nearby_p_aux( # This routine assumes tha thte input selected_heliostat_name_pqw_list has been sorted in order of increasing w. if len(remaining_heliostat_name_pqw_list) == 0: # There are no more heliostats to consider, so return. - return ( - selected_heliostat_name_pqw_list, - rejected_heliostat_name_pqw_list, - remaining_heliostat_name_pqw_list, - ) + return (selected_heliostat_name_pqw_list, rejected_heliostat_name_pqw_list, remaining_heliostat_name_pqw_list) else: # Select the heliostat closest to the section plane. selected_heliostat_name_pqw = remaining_heliostat_name_pqw_list[0] @@ -73,9 +67,7 @@ def select_min_w_reject_nearby_p_aux( ) -def select_min_w_reject_nearby_p( - candidate_heliostat_name_pqw_list, ufacet_scan_parameters -): +def select_min_w_reject_nearby_p(candidate_heliostat_name_pqw_list, ufacet_scan_parameters): # Fetch required control parameters. discard_threshold_p = ufacet_scan_parameters['discard_threshold_p'] # Prepare recursion variables. @@ -84,15 +76,13 @@ def select_min_w_reject_nearby_p( remaining_heliostat_name_pqw_list = candidate_heliostat_name_pqw_list # Recursive calculation to select the heliostats near the selection plane, # eliminate close-p clusters, and return both selected and rejected heliostats. - ( - selected_heliostat_name_pqw_list, - rejected_heliostat_name_pqw_list, - remaining_heliostat_name_pqw_list, - ) = select_min_w_reject_nearby_p_aux( - selected_heliostat_name_pqw_list, - rejected_heliostat_name_pqw_list, - remaining_heliostat_name_pqw_list, - discard_threshold_p, + (selected_heliostat_name_pqw_list, rejected_heliostat_name_pqw_list, remaining_heliostat_name_pqw_list) = ( + select_min_w_reject_nearby_p_aux( + selected_heliostat_name_pqw_list, + rejected_heliostat_name_pqw_list, + remaining_heliostat_name_pqw_list, + discard_threshold_p, + ) ) # Return. # Don't return the recursion variables. @@ -109,23 +99,15 @@ def construct_ufacet_section(solar_field, best_fit_segment_xy, ufacet_scan_param # Sort by w. sort_heliostat_name_pqw_list_by_w(candidate_heliostat_name_pqw_list) # Select heliostats close to section plane, and discard close neighbors. - (selected_heliostat_name_pqw_list, rejected_heliostat_name_pqw_list) = ( - select_min_w_reject_nearby_p( - candidate_heliostat_name_pqw_list, ufacet_scan_parameters - ) + (selected_heliostat_name_pqw_list, rejected_heliostat_name_pqw_list) = select_min_w_reject_nearby_p( + candidate_heliostat_name_pqw_list, ufacet_scan_parameters ) # Sort in order of ascending p. sort_heliostat_name_pqw_list_by_p(selected_heliostat_name_pqw_list) # Extract heliostat names. - candidate_heliostat_name_list = [ - name_pqw[0] for name_pqw in candidate_heliostat_name_pqw_list - ] - selected_heliostat_name_list = [ - name_pqw[0] for name_pqw in selected_heliostat_name_pqw_list - ] - rejected_heliostat_name_list = [ - name_pqw[0] for name_pqw in rejected_heliostat_name_pqw_list - ] + candidate_heliostat_name_list = [name_pqw[0] for name_pqw in candidate_heliostat_name_pqw_list] + selected_heliostat_name_list = [name_pqw[0] for name_pqw in selected_heliostat_name_pqw_list] + rejected_heliostat_name_list = [name_pqw[0] for name_pqw in rejected_heliostat_name_pqw_list] # Store results. section = {} section['view_spec'] = section_view_spec @@ -136,17 +118,13 @@ def construct_ufacet_section(solar_field, best_fit_segment_xy, ufacet_scan_param return section -def construct_ufacet_sections( - solar_field, list_of_best_fit_segment_xys, ufacet_scan_parameters -): +def construct_ufacet_sections(solar_field, list_of_best_fit_segment_xys, ufacet_scan_parameters): # Notify progress. print('Constructing UFACET scan sections...') # Analyze each segment. section_list = [] for best_fit_segment_xy in list_of_best_fit_segment_xys: - section = construct_ufacet_section( - solar_field, best_fit_segment_xy, ufacet_scan_parameters - ) + section = construct_ufacet_section(solar_field, best_fit_segment_xy, ufacet_scan_parameters) section_list.append(section) return section_list diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction_render.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction_render.py index 70654318..b99653e7 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction_render.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction_render.py @@ -16,20 +16,13 @@ def draw_construct_ufacet_section( - figure_control, - solar_field, - section, - render_view_spec, - render_control_scan_section_setup, + figure_control, solar_field, section, render_view_spec, render_control_scan_section_setup ): # Draw setup of the section. if render_control_scan_section_setup.draw_section_setup: # Setup figure. fig_record = fm.setup_figure_for_3d_data( - figure_control, - rca.meters(), - render_view_spec, - title='UFACET Section Construction', + figure_control, rca.meters(), render_view_spec, title='UFACET Section Construction' ) view = fig_record.view # Comment. @@ -51,13 +44,11 @@ def draw_construct_ufacet_section( else: if render_control_scan_section_setup.highlight_selected_heliostats: solar_field_style.heliostat_styles.add_special_names( - section['selected_heliostat_name_list'], - rch.normal_outline(color='g'), + section['selected_heliostat_name_list'], rch.normal_outline(color='g') ) if render_control_scan_section_setup.highlight_rejected_heliostats: solar_field_style.heliostat_styles.add_special_names( - section['rejected_heliostat_name_list'], - rch.normal_outline(color='r'), + section['rejected_heliostat_name_list'], rch.normal_outline(color='r') ) # Draw the solar field. solar_field.draw(view, solar_field_style) @@ -66,17 +57,13 @@ def draw_construct_ufacet_section( # Fetch view spec projection information. segment_xy = section_view_spec['defining_segment_xy'] line_xy = section_view_spec['line_intersecting_xy_plane'] - origin_xyz = np.array( - section_view_spec['origin_xyz'] - ) # Make arrays so we can do simple vactor math. + origin_xyz = np.array(section_view_spec['origin_xyz']) # Make arrays so we can do simple vactor math. p_uxyz = np.array(section_view_spec['p_uxyz']) # q_uxyz = np.array(section_view_spec['q_uxyz']) # w_uxyz = np.array(section_view_spec['w_uxyz']) # # Defining segment. segment_xyz = [p + [0] for p in segment_xy] - view.draw_xyz_list( - segment_xyz, style=rcps.outline(color='brown', linewidth=2.5) - ) + view.draw_xyz_list(segment_xyz, style=rcps.outline(color='brown', linewidth=2.5)) # Section plane. box_xyz = solar_field.heliostat_bounding_box_xyz() box_min_xyz = box_xyz[0] @@ -95,24 +82,19 @@ def draw_construct_ufacet_section( line_segment_xy0 = line_segment_xy[0] line_segment_xy1 = line_segment_xy[1] view.draw_xyz_list( - [line_segment_xy0 + [0], line_segment_xy1 + [0]], - style=rcps.outline(color='c', linewidth=0.5), + [line_segment_xy0 + [0], line_segment_xy1 + [0]], style=rcps.outline(color='c', linewidth=0.5) ) view.draw_xyz_list( - [line_segment_xy0 + [z_min], line_segment_xy1 + [z_min]], - style=rcps.outline(color='c', linewidth=0.5), + [line_segment_xy0 + [z_min], line_segment_xy1 + [z_min]], style=rcps.outline(color='c', linewidth=0.5) ) view.draw_xyz_list( - [line_segment_xy0 + [z_max], line_segment_xy1 + [z_max]], - style=rcps.outline(color='c', linewidth=0.5), + [line_segment_xy0 + [z_max], line_segment_xy1 + [z_max]], style=rcps.outline(color='c', linewidth=0.5) ) view.draw_xyz_list( - [line_segment_xy0 + [z_min], line_segment_xy0 + [z_max]], - style=rcps.outline(color='c', linewidth=0.5), + [line_segment_xy0 + [z_min], line_segment_xy0 + [z_max]], style=rcps.outline(color='c', linewidth=0.5) ) view.draw_xyz_list( - [line_segment_xy1 + [z_min], line_segment_xy1 + [z_max]], - style=rcps.outline(color='c', linewidth=0.5), + [line_segment_xy1 + [z_min], line_segment_xy1 + [z_max]], style=rcps.outline(color='c', linewidth=0.5) ) # Origin view.draw_xyz(origin_xyz, style=rcps.marker(marker='o', color='r')) @@ -146,11 +128,7 @@ def draw_construct_ufacet_section( def draw_construct_ufacet_sections( - figure_control, - solar_field, - section_list, - input_view_spec, - render_control_scan_section_setup, + figure_control, solar_field, section_list, input_view_spec, render_control_scan_section_setup ): for section in section_list: if input_view_spec == None: @@ -158,9 +136,5 @@ def draw_construct_ufacet_sections( else: render_view_spec = input_view_spec draw_construct_ufacet_section( - figure_control, - solar_field, - section, - render_view_spec, - render_control_scan_section_setup, + figure_control, solar_field, section, render_view_spec, render_control_scan_section_setup ) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis.py index 661ba09b..31cead87 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis.py @@ -16,21 +16,11 @@ def ideal_gaze_xy_list( ): # Integration limits. # Integrate forward from the key point. post_x_list, post_y_list = ideal_gaze_xy_list_aux( - aimpoint_xyz, - field_origin_lon_lat, - when_ymdhmsz, - key_xy, - integration_step, - bbox_xy, + aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz, key_xy, integration_step, bbox_xy ) # Integrate backward from the key point. pre_x_list, pre_y_list = ideal_gaze_xy_list_aux( - aimpoint_xyz, - field_origin_lon_lat, - when_ymdhmsz, - key_xy, - -integration_step, # Reverse direction - bbox_xy, + aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz, key_xy, -integration_step, bbox_xy # Reverse direction ) pre_x_list.reverse() pre_y_list.reverse() @@ -59,9 +49,7 @@ def ideal_gaze_xy_list_aux( curve_x_list = [] curve_y_list = [] while ((x_min <= x_c) and (x_c <= x_max)) and ((y_min <= y_c) and (y_c <= y_max)): - nu = sun_track.tracking_nu( - [x_c, y_c, 0], aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz - ) + nu = sun_track.tracking_nu([x_c, y_c, 0], aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz) gamma = nu + np.pi x_c += integration_step * np.cos(gamma) y_c += integration_step * np.sin(gamma) @@ -123,12 +111,7 @@ def ufacet_xy_analysis(solar_field, aimpoint_xyz, when_ymdhmsz, curve_key_xy_lis for key_xy in curve_key_xy_list: # Construct ideal gaze curve. ideal_xy_list = ideal_gaze_xy_list( - aimpoint_xyz, - field_origin_lon_lat, - when_ymdhmsz, - key_xy, - integration_step, - bbox_xy, + aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz, key_xy, integration_step, bbox_xy ) list_of_ideal_xy_lists.append(ideal_xy_list) # Construct segment approximating curve. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis_render.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis_render.py index 123fd403..50021dff 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis_render.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_xy_analysis_render.py @@ -27,27 +27,16 @@ def draw_ufacet_xy_analysis( if render_control_scan_xy_analysis.draw_xy_segment_analysis: # Solar field. rcsf_vfield = rcsf.heliostat_vector_field_outlines(color='grey') - view_xy = sf.draw_solar_field( - figure_control, solar_field, rcsf_vfield, vs.view_spec_xy() - ) + view_xy = sf.draw_solar_field(figure_control, solar_field, rcsf_vfield, vs.view_spec_xy()) # Dense vector field. grid_xy = solar_field.heliostat_field_regular_grid_xy(30, 15) grid_xydxy = [ - [ - p, - sun_track.tracking_surface_normal_xy( - p + [0], aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz - ), - ] + [p, sun_track.tracking_surface_normal_xy(p + [0], aimpoint_xyz, field_origin_lon_lat, when_ymdhmsz)] for p in grid_xy ] - view_xy.draw_pqdpq_list( - grid_xydxy, style=rcps.vector_field(color='k', vector_scale=5.0) - ) + view_xy.draw_pqdpq_list(grid_xydxy, style=rcps.vector_field(color='k', vector_scale=5.0)) # Key points. - view_xy.draw_pq_list( - curve_key_xy_list, style=rcps.marker(markersize=5, color='r') - ) + view_xy.draw_pq_list(curve_key_xy_list, style=rcps.marker(markersize=5, color='r')) # Ideal gaze curves. for ideal_xy_list in list_of_ideal_xy_lists: view_xy.draw_pq_list(ideal_xy_list, style=rcps.outline(color='r')) @@ -59,9 +48,7 @@ def draw_ufacet_xy_analysis( if render_control_scan_xy_analysis.draw_xy_segment_result: # Solar field. rcsf_vfield = rcsf.heliostat_vector_field_outlines(color='grey') - view_xy = sf.draw_solar_field( - figure_control, solar_field, rcsf_vfield, vs.view_spec_xy() - ) + view_xy = sf.draw_solar_field(figure_control, solar_field, rcsf_vfield, vs.view_spec_xy()) # Best fit segments. for segment_xy in list_of_best_fit_segment_xys: view_xy.draw_pq_list(segment_xy, style=rcps.outline(color='g', linewidth=2)) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity.py index f22edd3d..eff2502c 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity.py @@ -9,12 +9,7 @@ import opencsp.common.lib.csp.SolarField as sf -def construct_vanity_scan( - solar_field, - vanity_scan_parameter_file, - vanity_heliostat_name, - vanity_heliostat_azimuth, -): +def construct_vanity_scan(solar_field, vanity_scan_parameter_file, vanity_heliostat_name, vanity_heliostat_azimuth): # Notify progress. print('Constructing vanity scan...') diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py index 08fe5e72..b862c062 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py @@ -28,11 +28,7 @@ def check_eta(eta): + str(np.rad2deg(eta)) + 'encountered.' ) - print( - ' We reject positive gaze angles exceeding ' - + str(np.rad2deg(eta_max)) - + ' degrees.' - ) + print(' We reject positive gaze angles exceeding ' + str(np.rad2deg(eta_max)) + ' degrees.') assert False eta_min = np.rad2deg(-90.0) if eta < eta_min: @@ -41,17 +37,11 @@ def check_eta(eta): + str(np.rad2deg(eta)) + 'encountered.' ) - print( - ' We reject gaze angles less than ' - + str(np.rad2deg(eta_min)) - + ' degrees.' - ) + print(' We reject gaze angles less than ' + str(np.rad2deg(eta_min)) + ' degrees.') assert False -def construct_vanity_scan_parameters( - vanity_scan_parameter_file, vanity_heliostat_name, vanity_heliostat_azimuth -): +def construct_vanity_scan_parameters(vanity_scan_parameter_file, vanity_heliostat_name, vanity_heliostat_azimuth): # General scan parameters. scan_parameters = psp.construct_scan_parameters(vanity_scan_parameter_file) @@ -61,17 +51,13 @@ def construct_vanity_scan_parameters( scan_parameters['fly_forward_backward'] = True # False if vanity_scan_parameter_file == 'NSTTF': - print( - 'ERROR: In construct_vanity_scan_parameters(), NSTTF case not implemented yet.' - ) + print('ERROR: In construct_vanity_scan_parameters(), NSTTF case not implemented yet.') assert False # Vanity scan parameters. eta = np.deg2rad(-35.0) # Arbitrary test value. scan_parameters['n_horizontal'] = 10 # Number of horizontal passes. scan_parameters['n_vertical'] = 6 # Number of vertical passes. - scan_parameters['eta'] = ( - eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). - ) + scan_parameters['eta'] = eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). scan_parameters['relative_z'] = 20 # m. scan_parameters['speed'] = 10 # m/sec. # Check result and return. diff --git a/contrib/app/ufacet-s/helio_scan/070_ExtractedFrames.py b/contrib/app/ufacet-s/helio_scan/070_ExtractedFrames.py index 7f23537a..c6ad6f9d 100644 --- a/contrib/app/ufacet-s/helio_scan/070_ExtractedFrames.py +++ b/contrib/app/ufacet-s/helio_scan/070_ExtractedFrames.py @@ -29,26 +29,16 @@ def __init__( ): # Flags to control rendering on this run. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In ExtractedFrames.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In ExtractedFrames.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In ExtractedFrames.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In ExtractedFrames.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In ExtractedFrames.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In ExtractedFrames.__init__(), null output_render_dir encountered.') if (output_frame_dir == None) or (len(output_frame_dir) == 0): - raise ValueError( - 'In ExtractedFrames.__init__(), null output_frame_dir encountered.' - ) + raise ValueError('In ExtractedFrames.__init__(), null output_frame_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. self.input_video_dir_body_ext = input_video_dir_body_ext @@ -82,14 +72,10 @@ def extract_frames_and_write_data(self): # Check if frames are already extracted. if ft.directory_is_empty(self.output_frame_dir): - print( - 'In ExtractedFrames.extract_frames_and_write_data(), extracting frames...' - ) + print('In ExtractedFrames.extract_frames_and_write_data(), extracting frames...') # Extract the frames. n_frames = vm.extract_frames( - self.input_video_dir_body_ext, - self.output_frame_dir, - self.output_frame_id_format, + self.input_video_dir_body_ext, self.output_frame_dir, self.output_frame_id_format ) # Write the summary information. @@ -97,9 +83,7 @@ def extract_frames_and_write_data(self): ft.create_directories_if_necessary(self.output_data_dir) summary_dict = {} summary_dict['n_frames_maybe_duplicates'] = n_frames - print( - 'In ExtractedFrames.extract_frames_and_write_data(), writing frame summary statistics...' - ) + print('In ExtractedFrames.extract_frames_and_write_data(), writing frame summary statistics...') ft.write_dict_file( 'frame summary statistics (possibly includes duplicates)', self.output_data_dir, @@ -109,10 +93,7 @@ def extract_frames_and_write_data(self): # LOAD RESULT def read_data(self): - print( - 'In ExtractedFrames.read_data(), reading frame statistics: ', - self.dict_dir_body_ext, - ) + print('In ExtractedFrames.read_data(), reading frame statistics: ', self.dict_dir_body_ext) self.frame_statistics_dict = ft.read_dict(self.dict_dir_body_ext) # RENDER RESULT @@ -134,12 +115,10 @@ def draw_example_frames(self): if __name__ == "__main__": input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) output_data_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/070_ExtractedFrames/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/070_ExtractedFrames/mavic_zoom/data/' ) output_render_dir = ( experiment_dir() diff --git a/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py b/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py index 13f04da5..3ed7f9e9 100644 --- a/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py +++ b/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py @@ -33,30 +33,18 @@ def __init__( ): # Flags to control rendering on this run. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In FramesNoDuplicates.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In FramesNoDuplicates.__init__(), null input_video_dir_body_ext encountered.') if (input_frame_dir == None) or (len(input_frame_dir) == 0): - raise ValueError( - 'In FramesNoDuplicates.__init__(), null input_frame_dir encountered.' - ) + raise ValueError('In FramesNoDuplicates.__init__(), null input_frame_dir encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In FramesNoDuplicates.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In FramesNoDuplicates.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In FramesNoDuplicates.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In FramesNoDuplicates.__init__(), null output_render_dir encountered.') if (output_frame_dir == None) or (len(output_frame_dir) == 0): - raise ValueError( - 'In FramesNoDuplicates.__init__(), null output_frame_dir encountered.' - ) + raise ValueError('In FramesNoDuplicates.__init__(), null output_frame_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. self.input_video_dir_body_ext = input_video_dir_body_ext @@ -79,16 +67,12 @@ def __init__( # File listing non-duplicate file names. self.non_duplicate_body = self.input_video_body + '_non_duplicate_frame_files' self.non_duplicate_body_ext = self.non_duplicate_body + '.txt' - self.non_duplicate_dir_body_ext = os.path.join( - self.output_data_dir, self.non_duplicate_body_ext - ) + self.non_duplicate_dir_body_ext = os.path.join(self.output_data_dir, self.non_duplicate_body_ext) # File listing duplicate file names. self.duplicate_body = self.input_video_body + '_duplicate_frame_files' self.duplicate_body_ext = self.duplicate_body + '.txt' - self.duplicate_dir_body_ext = os.path.join( - self.output_data_dir, self.duplicate_body_ext - ) + self.duplicate_dir_body_ext = os.path.join(self.output_data_dir, self.duplicate_body_ext) # Extract frames, if not already. self.filter_frames_and_write_data() @@ -106,18 +90,11 @@ def filter_frames_and_write_data(self): # Check if frames are already copied. if ft.directory_is_empty(self.output_frame_dir): - print( - 'In FramesNoDuplicates.filter_frames_and_write_data(), filtering frames...' - ) + print('In FramesNoDuplicates.filter_frames_and_write_data(), filtering frames...') # Identify duplicate frames. - (non_duplicate_frame_files, duplicate_frame_files) = ( - vm.identify_duplicate_frames( - self.input_frame_dir, - self.output_frame_dir, - self.tolerance_image_size, - self.tolerance_image_pixel, - ) + (non_duplicate_frame_files, duplicate_frame_files) = vm.identify_duplicate_frames( + self.input_frame_dir, self.output_frame_dir, self.tolerance_image_size, self.tolerance_image_pixel ) # Copy non-duplicate frames to frame output directory. @@ -128,9 +105,7 @@ def filter_frames_and_write_data(self): + ' to ' + output_frame_dir ) - ft.copy_file( - os.path.join(input_frame_dir, frame_file), output_frame_dir - ) + ft.copy_file(os.path.join(input_frame_dir, frame_file), output_frame_dir) # Write the summary information. # Create the output data directory if necessary. @@ -138,34 +113,18 @@ def filter_frames_and_write_data(self): summary_dict = {} summary_dict['n_frames_non_duplicates'] = len(non_duplicate_frame_files) summary_dict['n_frames_duplicates'] = len(duplicate_frame_files) - print( - 'In FramesNoDuplicates.filter_frames_and_write_data(), writing frame summary statistics...' - ) + print('In FramesNoDuplicates.filter_frames_and_write_data(), writing frame summary statistics...') ft.write_dict_file( - 'frame summary statistics (duplicates removed)', - self.output_data_dir, - self.dict_body, - summary_dict, + 'frame summary statistics (duplicates removed)', self.output_data_dir, self.dict_body, summary_dict ) ft.write_text_file( - 'non-duplicate frames', - self.output_data_dir, - self.non_duplicate_body, - non_duplicate_frame_files, - ) - ft.write_text_file( - 'duplicate frames', - self.output_data_dir, - self.duplicate_body, - duplicate_frame_files, + 'non-duplicate frames', self.output_data_dir, self.non_duplicate_body, non_duplicate_frame_files ) + ft.write_text_file('duplicate frames', self.output_data_dir, self.duplicate_body, duplicate_frame_files) # LOAD RESULT def read_data(self): - print( - 'In FramesNoDuplicates.read_data(), reading frame statistics: ', - self.dict_dir_body_ext, - ) + print('In FramesNoDuplicates.read_data(), reading frame statistics: ', self.dict_dir_body_ext) self.frame_statistics_dict = ft.read_dict(self.dict_dir_body_ext) # RENDER RESULT @@ -187,8 +146,7 @@ def draw_example_frames(self): if __name__ == "__main__": input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_frame_dir = ( experiment_dir() diff --git a/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py b/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py index 163f9a08..61c1484a 100644 --- a/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py +++ b/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py @@ -54,28 +54,18 @@ def __init__( ): # Flags to control rendering on this run. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In KeyFramesGivenManual.__init__(), null input_video_dir_body_ext encountered.' - ) - if (input_original_keyinfo_dir_body_ext == None) or ( - len(input_original_keyinfo_dir_body_ext) == 0 - ): + raise ValueError('In KeyFramesGivenManual.__init__(), null input_video_dir_body_ext encountered.') + if (input_original_keyinfo_dir_body_ext == None) or (len(input_original_keyinfo_dir_body_ext) == 0): raise ValueError( 'In KeyFramesGivenManual.__init__(), null input_original_keyinfo_dir_body_ext encountered.' ) if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In KeyFramesGivenManual.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In KeyFramesGivenManual.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In KeyFramesGivenManual.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In KeyFramesGivenManual.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. @@ -101,16 +91,12 @@ def __init__( # Lookup frames per second. video = cv.VideoCapture(input_video_dir_body_ext) - self.fps = ( - int(video.get(cv.CAP_PROP_FPS)) + 1 - ) # +1 hard value # RCB doesn't understand this. + self.fps = int(video.get(cv.CAP_PROP_FPS)) + 1 # +1 hard value # RCB doesn't understand this. # Key frame file name. self.key_frames_body = self.input_video_body + '_key_frames_fnxl' self.key_frames_body_ext = self.key_frames_body + '.csv' - self.key_frames_dir_body_ext = os.path.join( - self.output_data_dir, self.key_frames_body_ext - ) + self.key_frames_dir_body_ext = os.path.join(self.output_data_dir, self.key_frames_body_ext) # Summary statistics file name. self.dict_body = self.input_video_body + '_key_frame_statistics' @@ -177,16 +163,10 @@ def convert_and_save_original_keyinfo_file(self): heliostat_names = keyinfo[3] frame_id = instance_frame_correspondence_dict[instance_key] # Construct key frame entry. - (key_frame_id, list_of_name_polygons) = ( - self.construct_key_frame_entry( - instance_key, - keyinfo_dict, - instance_frame_correspondence_dict, - ) - ) - key_frames_fnxl.add_list_of_name_xy_lists( - key_frame_id, list_of_name_polygons + (key_frame_id, list_of_name_polygons) = self.construct_key_frame_entry( + instance_key, keyinfo_dict, instance_frame_correspondence_dict ) + key_frames_fnxl.add_list_of_name_xy_lists(key_frame_id, list_of_name_polygons) # for key_frame_id_key in key_frame_dict.keys(): # print('In KeyFramesGivenManual.convert_and_save_original_keyinfo_file(), ' + str(key_frame_id_key) + ': ' + str(key_frame_dict[key_frame_id_key])) else: @@ -201,9 +181,7 @@ def convert_and_save_original_keyinfo_file(self): key_frames_fnxl = fnxl.FrameNameXyList() key_frames_fnxl.load(self.input_edited_key_frames_dir_body_ext) # Confirm what was read. - print( - 'KeyFramesGivenManual.convert_and_save_original_keyinfo_file(), key frame specfication read:' - ) + print('KeyFramesGivenManual.convert_and_save_original_keyinfo_file(), key frame specfication read:') key_frames_fnxl.print(max_keys=7, max_value_length=200, indent=4) # Write key frame file. @@ -220,31 +198,18 @@ def convert_and_save_original_keyinfo_file(self): print( 'In KeyFramesGivenManual.convert_and_save_original_keyinfo_file(), writing key frame summary statistics...' ) - ft.write_dict_file( - 'key frame summary statistics', - self.output_data_dir, - self.dict_body, - summary_dict, - ) + ft.write_dict_file('key frame summary statistics', self.output_data_dir, self.dict_body, summary_dict) # Heliostats per key frame. heliostats_per_key_frame_dict = key_frames_fnxl.heliostats_per_frame() print( 'In KeyFramesGivenManual.convert_and_save_original_keyinfo_file(), writing heliostats per key frame:', os.path.join(self.output_data_dir, self.hpkf_body_ext), ) - ft.write_dict_file( - None, - self.output_data_dir, - self.hpkf_body, - heliostats_per_key_frame_dict, - ) + ft.write_dict_file(None, self.output_data_dir, self.hpkf_body, heliostats_per_key_frame_dict) def read_keyinfo(self): # Load original keyinfo file. - print( - 'In KeyFramesGivenManual.read_keyinfo(), reading file:', - self.input_original_keyinfo_dir_body_ext, - ) + print('In KeyFramesGivenManual.read_keyinfo(), reading file:', self.input_original_keyinfo_dir_body_ext) keyinfo_stream = open(self.input_original_keyinfo_dir_body_ext, "r") keyinfo_lines_with_newline = keyinfo_stream.readlines() keyinfo_lines = [line.rstrip('\n') for line in keyinfo_lines_with_newline] @@ -289,9 +254,7 @@ def instance_to_frameid(self, instance): # Return. return starting_id, ending_id - def construct_key_frame_entry( - self, instance_key, keyinfo_dict, instance_frame_correspondence_dict - ): + def construct_key_frame_entry(self, instance_key, keyinfo_dict, instance_frame_correspondence_dict): # Fetch information. key_frame_id = instance_frame_correspondence_dict[instance_key] keyinfo = keyinfo_dict[ @@ -301,15 +264,11 @@ def construct_key_frame_entry( box_coords = keyinfo[2] heliostat_names = keyinfo[3] # Construct entry. - entry = self.construct_key_frame_entry_aux( - number_of_heliostats, heliostat_names, box_coords - ) + entry = self.construct_key_frame_entry_aux(number_of_heliostats, heliostat_names, box_coords) # Return. return key_frame_id, entry - def construct_key_frame_entry_aux( - self, number_of_heliostats, heliostat_names, box_coords - ): + def construct_key_frame_entry_aux(self, number_of_heliostats, heliostat_names, box_coords): """ A key frame entry is a list: @@ -345,12 +304,7 @@ def construct_key_frame_entry_aux( bboxes = [] for idx in range(0, number_of_heliostats): bbox_idx = idx * 4 - bbox = [ - box_coords[bbox_idx], - box_coords[bbox_idx + 1], - box_coords[bbox_idx + 2], - box_coords[bbox_idx + 3], - ] + bbox = [box_coords[bbox_idx], box_coords[bbox_idx + 1], box_coords[bbox_idx + 2], box_coords[bbox_idx + 3]] bboxes.append(bbox) # Assemble the entry. list_of_name_polygons = [] @@ -386,16 +340,10 @@ def polygon_given_bbox(self, bbox): y_2 = bbox[3] # Check validity. if x_1 == x_2: - print( - 'ERROR: In KeyFramesGivenManual.polygon_given_bbox(), corner x values are equal. x_1 == x_2 ==', - x_1, - ) + print('ERROR: In KeyFramesGivenManual.polygon_given_bbox(), corner x values are equal. x_1 == x_2 ==', x_1) assert False if y_1 == y_2: - print( - 'ERROR: In KeyFramesGivenManual.polygon_given_bbox(), corner y values are equal. y_1 == y_2 ==', - y_1, - ) + print('ERROR: In KeyFramesGivenManual.polygon_given_bbox(), corner y values are equal. y_1 == y_2 ==', y_1) assert False # Identify boundaries. x_min = min(x_1, x_2) @@ -413,10 +361,7 @@ def polygon_given_bbox(self, bbox): # LOAD RESULT def read_key_frames(self): - print( - 'In KeyFramesGivenManual.read_key_frames(), reading key frames: ', - self.key_frames_dir_body_ext, - ) + print('In KeyFramesGivenManual.read_key_frames(), reading key frames: ', self.key_frames_dir_body_ext) self.key_frames_fnxl = fnxl.FrameNameXyList() self.key_frames_fnxl.load(self.key_frames_dir_body_ext) # Confirm what was read. @@ -424,15 +369,9 @@ def read_key_frames(self): self.key_frames_fnxl.print(max_keys=7, max_value_length=200, indent=4) def read_data(self): - print( - 'In KeyFramesGivenManual.read_data(), reading frame statistics: ', - self.dict_dir_body_ext, - ) + print('In KeyFramesGivenManual.read_data(), reading frame statistics: ', self.dict_dir_body_ext) self.frame_statistics_dict = ft.read_dict(self.dict_dir_body_ext) - print( - 'In KeyFramesGivenManual.read_data(), reading heliostats per key frame: ', - self.hpkf_dir_body_ext, - ) + print('In KeyFramesGivenManual.read_data(), reading heliostats per key frame: ', self.hpkf_dir_body_ext) self.hpkf_dict = ft.read_dict(self.hpkf_dir_body_ext) # Confirm what was read. print('In KeyFramesGivenManual.read_data(), heliostats per key frame read:') @@ -442,9 +381,7 @@ def read_data(self): def render(self): if self.render_control.draw_key_frames: - print( - 'In KeyFramesGivenManual.render(), rendering frames with key corners...' - ) + print('In KeyFramesGivenManual.render(), rendering frames with key corners...') # Descriptive strings. title_name = 'Key Frame' context_str = 'KeyFramesGivenManual.render()' @@ -452,9 +389,7 @@ def render(self): fig_suffix = '_key_frame_fig' delete_suffix = '.JPG' + fig_suffix + '.png' # Prepare directory. - upc.prepare_render_directory( - self.output_render_dir, delete_suffix, self.render_control - ) + upc.prepare_render_directory(self.output_render_dir, delete_suffix, self.render_control) # Setup annotation styles. style_dict = {} style_dict['point_seq'] = rcps.outline( @@ -498,8 +433,7 @@ def render(self): ) # Input/output sources. input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_original_keyinfo_dir_body_ext = ( experiment_dir() @@ -515,16 +449,12 @@ def render(self): experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/080c_FramesNoDuplicates/mavic_zoom/frames/' ) - input_frame_id_format = ( - '06d' # Note different from format used in ffmpeg call, which is '.%06d' - ) + input_frame_id_format = '06d' # Note different from format used in ffmpeg call, which is '.%06d' output_data_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/130_KeyFrames/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/130_KeyFrames/mavic_zoom/data/' ) output_render_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/130_KeyFrames/mavic_zoom/render/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/130_KeyFrames/mavic_zoom/render/' ) # Render control. render_control = rckfgm.default() diff --git a/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py b/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py index bde2a9da..4850077f 100644 --- a/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py +++ b/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py @@ -52,22 +52,14 @@ def __init__( ): # Flags to control rendering on this run. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In KeyCorners.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In KeyCorners.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In KeyCorners.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In KeyCorners.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In KeyCorners.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In KeyCorners.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. @@ -90,9 +82,7 @@ def __init__( self.render_control = render_control # Found key corners file name. - self.all_key_frames_corners_body = ( - self.input_video_body + '_all_frames_corners_fnxl' - ) + self.all_key_frames_corners_body = self.input_video_body + '_all_frames_corners_fnxl' self.all_key_frames_corners_body_ext = self.all_key_frames_corners_body + '.csv' self.all_key_frames_corners_dir_body_ext = os.path.join( self.output_data_dir, self.all_key_frames_corners_body_ext @@ -107,9 +97,7 @@ def __init__( # File listing key frames with mismatched heliostats. self.mismatched_ids_body = self.input_video_body + '_mismatched_key_frame_ids' self.mismatched_ids_body_ext = self.mismatched_ids_body + '.txt' - self.mismatched_ids_dir_body_ext = os.path.join( - self.output_data_dir, self.mismatched_ids_body_ext - ) + self.mismatched_ids_dir_body_ext = os.path.join(self.output_data_dir, self.mismatched_ids_body_ext) # Heliostats per key frame file name. self.hpkf_body = self.input_video_body + '_heliostats_per_key_frame' @@ -122,10 +110,7 @@ def __init__( self.ppkf_dir_body_ext = os.path.join(self.output_data_dir, self.ppkf_body_ext) # Load key frames file. - print( - 'In KeyCorners.__init__(), reading key frame specs: ', - self.input_key_frames_dir_body_ext, - ) + print('In KeyCorners.__init__(), reading key frame specs: ', self.input_key_frames_dir_body_ext) self.key_frames_fnxl = fnxl.FrameNameXyList() self.key_frames_fnxl.load(self.input_key_frames_dir_body_ext) # Confirm what was read. @@ -135,9 +120,7 @@ def __init__( # Fetch a list of all frame ids in the video (not just key frames). # The corresponding frame_ids are not necessarily in sequential order, because # we previously removed spurious duplicate frames. - self.all_frame_file_list = ft.files_in_directory( - self.input_frame_dir, sort=True - ) + self.all_frame_file_list = ft.files_in_directory(self.input_frame_dir, sort=True) # # Confirm what was read. # print('In KeyCorners.__init__(), self.all_frame_file_list:') # for frame_file in self.all_frame_file_list: @@ -164,9 +147,7 @@ def find_and_save_key_corners(self): or (not ft.directory_exists(self.output_data_dir)) or ft.directory_is_empty(self.output_data_dir) ): - print( - 'In KeyCorners.find_and_save_key_corners(), searching for key corners...' - ) + print('In KeyCorners.find_and_save_key_corners(), searching for key corners...') # Determine which key frames to process. if self.specific_frame_ids == None: @@ -178,31 +159,20 @@ def find_and_save_key_corners(self): # Process each key frame_id. if self.single_processor: - print( - 'In KeyCorners.search_key_frames(), starting key frame corner search (single processor)...' - ) + print('In KeyCorners.search_key_frames(), starting key frame corner search (single processor)...') list_of_result_dicts = [] for key_frame_id in key_frame_ids_to_process: list_of_result_dicts.append(self.search_key_frame(key_frame_id)) else: - print( - 'In KeyCorners.search_key_frames(), starting key frame corner search (multi-processor)...' - ) - logger = logt.multiprocessing_logger( - self.log_dir_body_ext, level=logging.INFO - ) - logger.info( - '================================= Execution =================================' - ) + print('In KeyCorners.search_key_frames(), starting key frame corner search (multi-processor)...') + logger = logt.multiprocessing_logger(self.log_dir_body_ext, level=logging.INFO) + logger.info('================================= Execution =================================') with Pool(36) as pool: - list_of_result_dicts = pool.map( - self.search_key_frame, key_frame_ids_to_process - ) + list_of_result_dicts = pool.map(self.search_key_frame, key_frame_ids_to_process) # Remove "None" entries. list_of_fnxl_or_None_results = [ - result_dict['pair_projected_fnxl_or_None'] - for result_dict in list_of_result_dicts + result_dict['pair_projected_fnxl_or_None'] for result_dict in list_of_result_dicts ] key_frame_fnxls = [x for x in list_of_fnxl_or_None_results if x is not None] print( @@ -212,12 +182,9 @@ def find_and_save_key_corners(self): # Extract mismatched heliostat frame_ids. mismatched_key_frame_id_or_None_list = [ - result_dict['mismatched_key_frame_id_or_None'] - for result_dict in list_of_result_dicts - ] - mismatched_key_frame_ids = [ - x for x in mismatched_key_frame_id_or_None_list if x is not None + result_dict['mismatched_key_frame_id_or_None'] for result_dict in list_of_result_dicts ] + mismatched_key_frame_ids = [x for x in mismatched_key_frame_id_or_None_list if x is not None] print( 'In KeyCorners.search_key_frames(), mismatched corner frames extracted. len(mismatched_key_frame_ids) =', len(mismatched_key_frame_ids), @@ -231,10 +198,7 @@ def find_and_save_key_corners(self): all_key_frames_corners_fnxl = fnxl.construct_merged_copy(key_frame_fnxls) # Summarize search results. - print( - 'In KeyCorners.find_and_save_key_corners(), len(key_frame_fnxls) =', - len(key_frame_fnxls), - ) + print('In KeyCorners.find_and_save_key_corners(), len(key_frame_fnxls) =', len(key_frame_fnxls)) for key_frame_fnxl in key_frame_fnxls: print('In KeyCorners.find_and_save_key_corners(), key_frame_fnxl:') if key_frame_fnxl is None: @@ -245,9 +209,7 @@ def find_and_save_key_corners(self): '\nIn KeyCorners.find_and_save_key_corners(), all_key_frames_corners_fnxl.number_of_frames() =', all_key_frames_corners_fnxl.number_of_frames(), ) - print( - 'In KeyCorners.find_and_save_key_corners(), all_key_frames_corners_fnxl:' - ) + print('In KeyCorners.find_and_save_key_corners(), all_key_frames_corners_fnxl:') all_key_frames_corners_fnxl.print(max_value_length=200, indent=8) # Write found corners files. @@ -265,11 +227,7 @@ def find_and_save_key_corners(self): def search_key_frame(self, key_frame_id): # Notify start. - print( - 'In KeyCorners.search_key_frame(), fetching key frames for key_frame_id=' - + str(key_frame_id) - + '...' - ) + print('In KeyCorners.search_key_frame(), fetching key frames for key_frame_id=' + str(key_frame_id) + '...') # Find the filenames of the key frame and the frame that follows it. key_frame_body_ext_1 = upf.frame_file_body_ext_given_frame_id( @@ -299,46 +257,28 @@ def search_key_frame(self, key_frame_id): # Get the ids of the key frame pair. key_frame_id_1 = key_frame_id key_frame_id_2 = upf.frame_id_given_frame_file_body_ext(key_frame_body_ext_2) - print( - 'In KeyCorners.search_key_frames(), key_frame_id_1 =', key_frame_id_1 - ) # ?? SCAFFOLDING RCB -- TEMPORARY - print( - 'In KeyCorners.search_key_frames(), key_frame_id_2 =', key_frame_id_2 - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyCorners.search_key_frames(), key_frame_id_1 =', key_frame_id_1) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyCorners.search_key_frames(), key_frame_id_2 =', key_frame_id_2) # ?? SCAFFOLDING RCB -- TEMPORARY # Get the key frame name_polygon list. - list_of_name_polygons = self.key_frames_fnxl.list_of_name_xy_lists( - key_frame_id - ) # Applies to both frames. + list_of_name_polygons = self.key_frames_fnxl.list_of_name_xy_lists(key_frame_id) # Applies to both frames. # Assemble construction output directory paths. - key_frame_id_str_1 = upf.frame_id_str_given_frame_file_body_ext( - key_frame_body_ext_1 - ) - output_construction_dir_1 = os.path.join( - self.output_construction_dir, key_frame_id_str_1, 'frame1' - ) - output_construction_dir_2 = os.path.join( - self.output_construction_dir, key_frame_id_str_1, 'frame2' - ) + key_frame_id_str_1 = upf.frame_id_str_given_frame_file_body_ext(key_frame_body_ext_1) + output_construction_dir_1 = os.path.join(self.output_construction_dir, key_frame_id_str_1, 'frame1') + output_construction_dir_2 = os.path.join(self.output_construction_dir, key_frame_id_str_1, 'frame2') # Initialize logger. if not self.single_processor: # Don't make this a data member of self -- it will cause error: "can't pickle _thread.RLock objects" - local_logger = logt.multiprocessing_logger( - self.log_dir_body_ext, level=logging.INFO - ) + local_logger = logt.multiprocessing_logger(self.log_dir_body_ext, level=logging.INFO) else: local_logger = None # Search for key_frame 1 corners. try: search_result_1 = self.search_key_frame_aux( - local_logger, - key_frame_id_1, - list_of_name_polygons, - key_frame_body_ext_1, - output_construction_dir_1, + local_logger, key_frame_id_1, list_of_name_polygons, key_frame_body_ext_1, output_construction_dir_1 ) except: error_type, error_instance, traceback = sys.exc_info() @@ -353,11 +293,7 @@ def search_key_frame(self, key_frame_id): # Search for key_frame 2 corners. try: search_result_2 = self.search_key_frame_aux( - local_logger, - key_frame_id_2, - list_of_name_polygons, - key_frame_body_ext_2, - output_construction_dir_2, + local_logger, key_frame_id_2, list_of_name_polygons, key_frame_body_ext_2, output_construction_dir_2 ) except: error_type, error_instance, traceback = sys.exc_info() @@ -378,12 +314,8 @@ def search_key_frame(self, key_frame_id): # Construct a new fnxl that combines both key frame search results. # Take care to ensure that both have the same set of heliostat names. # This can return None if there are no xy_lists with a common name. - (pair_projected_fnxl_or_None, mismatched) = ( - self.construct_merged_fnxl_synchronizing_heliostat_names( - local_logger, - search_result_1.projected_fnxl(), - search_result_2.projected_fnxl(), - ) + (pair_projected_fnxl_or_None, mismatched) = self.construct_merged_fnxl_synchronizing_heliostat_names( + local_logger, search_result_1.projected_fnxl(), search_result_2.projected_fnxl() ) # Determine whether any found heliostats were lost from one frame to the next. @@ -399,33 +331,22 @@ def search_key_frame(self, key_frame_id): return result_dict def search_key_frame_aux( - self, - local_logger, - key_frame_id, - list_of_name_polygons, - key_frame_body_ext, - output_construction_dir, + self, local_logger, key_frame_id, list_of_name_polygons, key_frame_body_ext, output_construction_dir ): - key_frame_id_str = upf.frame_id_str_given_frame_file_body_ext( - key_frame_body_ext - ) + key_frame_id_str = upf.frame_id_str_given_frame_file_body_ext(key_frame_body_ext) # Read the key frame image. key_frame_dir_body_ext = os.path.join(self.input_frame_dir, key_frame_body_ext) key_frame_img = cv.imread(key_frame_dir_body_ext) if key_frame_img is None: logt.log_and_raise_value_error( - self.logger, - 'In KeyCorners.search_key_frame_aux(), error reading image file:', - key_frame_body_ext, + self.logger, 'In KeyCorners.search_key_frame_aux(), error reading image file:', key_frame_body_ext ) # Initialize output directory. ft.create_directories_if_necessary(output_construction_dir) # Execute search. logt.info( local_logger, - 'In KeyCorners.search_key_frame_aux(), searching for corners in key_frame_id=' - + str(key_frame_id) - + '...', + 'In KeyCorners.search_key_frame_aux(), searching for corners in key_frame_id=' + str(key_frame_id) + '...', ) search_result = kfcs.KeyFrameCornerSearch( key_frame_id=key_frame_id, @@ -438,16 +359,11 @@ def search_key_frame_aux( render_control=self.render_control, ) logt.info( - local_logger, - 'In KeyCorners.search_key_frame_aux(), corners done key_frame_id=' - + str(key_frame_id) - + '.', + local_logger, 'In KeyCorners.search_key_frame_aux(), corners done key_frame_id=' + str(key_frame_id) + '.' ) return search_result - def construct_merged_fnxl_synchronizing_heliostat_names( - self, local_logger, fnxl_1, fnxl_2 - ): + def construct_merged_fnxl_synchronizing_heliostat_names(self, local_logger, fnxl_1, fnxl_2): """ This rouitne merges the key frame 1 and key frame 2 corner search results into a combined FrameNameXyList object containing the corners found for both frames. However, it does not simply transcribe the corner @@ -501,17 +417,11 @@ def construct_merged_fnxl_synchronizing_heliostat_names( # Createa a new FrameNameXyList and add the synchronized name_xy_lists to it. pair_projected_fnxl = fnxl.FrameNameXyList() - pair_projected_fnxl.add_list_of_name_xy_lists( - frame_id_1, common_list_of_name_xy_lists_1 - ) - pair_projected_fnxl.add_list_of_name_xy_lists( - frame_id_2, common_list_of_name_xy_lists_2 - ) + pair_projected_fnxl.add_list_of_name_xy_lists(frame_id_1, common_list_of_name_xy_lists_1) + pair_projected_fnxl.add_list_of_name_xy_lists(frame_id_2, common_list_of_name_xy_lists_2) # Determine whether heliostats are mismatched from key frame 1 to key frame 2. - if (len(common_names) == len(name_set_1)) and ( - len(common_names) == len(name_set_2) - ): + if (len(common_names) == len(name_set_1)) and (len(common_names) == len(name_set_2)): mismatched = False else: mismatched = True @@ -534,24 +444,16 @@ def save_key_corners(self, all_key_frames_corners_fnxl, key_frame_fnxls): if key_frame_fnxl is not None: frame_id_list = key_frame_fnxl.sorted_frame_id_list() if len(frame_id_list) == 0: - raise ValueError( - 'In KeyCorners.save_key_corners(), empty key_frame_fnxl encountered.' - ) + raise ValueError('In KeyCorners.save_key_corners(), empty key_frame_fnxl encountered.') if len(frame_id_list) != 2: raise ValueError( 'In KeyCorners.save_key_corners(), encountered key_frame_fnxl with frames != 2. len(frame_id_list) =', len(frame_id_list), ) frame_id = frame_id_list[0] # Sorted, so this is the key frame. - frame_id_str = upf.frame_id_str_given_frame_id( - frame_id, self.input_frame_id_format - ) - key_frame_corners_body_ext = ( - self.input_video_body + '_' + frame_id_str + '_corners_fnxl.csv' - ) - key_frame_corners_dir_body_ext = os.path.join( - self.key_frame_corners_dir, key_frame_corners_body_ext - ) + frame_id_str = upf.frame_id_str_given_frame_id(frame_id, self.input_frame_id_format) + key_frame_corners_body_ext = self.input_video_body + '_' + frame_id_str + '_corners_fnxl.csv' + key_frame_corners_dir_body_ext = os.path.join(self.key_frame_corners_dir, key_frame_corners_body_ext) print( 'In KeyCorners.save_key_corners(), writing found frame_id key corners: ', key_frame_corners_dir_body_ext, @@ -561,43 +463,27 @@ def save_key_corners(self, all_key_frames_corners_fnxl, key_frame_fnxls): def save_data(self, all_key_frames_corners_fnxl, mismatched_key_frame_ids): # Statistics. summary_dict = {} - summary_dict['n_key_frames_with_corners'] = ( - all_key_frames_corners_fnxl.number_of_frames() - ) + summary_dict['n_key_frames_with_corners'] = all_key_frames_corners_fnxl.number_of_frames() print('In KeyCorners.save_data(), writing key frame summary statistics...') - ft.write_dict_file( - 'key frame corners summary statistics', - self.output_data_dir, - self.dict_body, - summary_dict, - ) + ft.write_dict_file('key frame corners summary statistics', self.output_data_dir, self.dict_body, summary_dict) # Key frames with mismatched heliostats. ft.write_text_file( - 'mismatched key frame ids', - self.output_data_dir, - self.mismatched_ids_body, - mismatched_key_frame_ids, + 'mismatched key frame ids', self.output_data_dir, self.mismatched_ids_body, mismatched_key_frame_ids ) # Heliostats per key frame. - heliostats_per_key_frame_dict = ( - all_key_frames_corners_fnxl.heliostats_per_frame() - ) + heliostats_per_key_frame_dict = all_key_frames_corners_fnxl.heliostats_per_frame() print( 'In KeyCorners.save_data(), writing heliostats per key frame:', os.path.join(self.output_data_dir, self.hpkf_body_ext), ) - ft.write_dict_file( - None, self.output_data_dir, self.hpkf_body, heliostats_per_key_frame_dict - ) + ft.write_dict_file(None, self.output_data_dir, self.hpkf_body, heliostats_per_key_frame_dict) # Points per key frame. points_per_key_frame_dict = all_key_frames_corners_fnxl.points_per_frame() print( 'In KeyCorners.save_data(), writing points per key frame: ', os.path.join(self.output_data_dir, self.ppkf_body_ext), ) - ft.write_dict_file( - None, self.output_data_dir, self.ppkf_body, points_per_key_frame_dict - ) + ft.write_dict_file(None, self.output_data_dir, self.ppkf_body, points_per_key_frame_dict) # LOAD RESULT @@ -610,25 +496,14 @@ def read_key_corners(self): self.all_key_frames_corners_fnxl.load(self.all_key_frames_corners_dir_body_ext) # Confirm what was read. print('In KeyCorners.read_key_corners(), all-frame key corners read:') - self.all_key_frames_corners_fnxl.print( - max_keys=7, max_value_length=200, indent=4 - ) - print( - 'In KeyCorners.read_key_corners(), reading found key corners directory: ', - self.key_frame_corners_dir, - ) - key_frame_corners_body_ext_list = ft.files_in_directory( - self.key_frame_corners_dir - ) + self.all_key_frames_corners_fnxl.print(max_keys=7, max_value_length=200, indent=4) + print('In KeyCorners.read_key_corners(), reading found key corners directory: ', self.key_frame_corners_dir) + key_frame_corners_body_ext_list = ft.files_in_directory(self.key_frame_corners_dir) self.list_of_key_frame_corners_dir_body_ext = [] self.list_of_key_frame_corners_fnxls = [] for key_frame_corners_body_ext in key_frame_corners_body_ext_list: - key_frame_corners_dir_body_ext = os.path.join( - self.key_frame_corners_dir, key_frame_corners_body_ext - ) - self.list_of_key_frame_corners_dir_body_ext.append( - key_frame_corners_dir_body_ext - ) + key_frame_corners_dir_body_ext = os.path.join(self.key_frame_corners_dir, key_frame_corners_body_ext) + self.list_of_key_frame_corners_dir_body_ext.append(key_frame_corners_dir_body_ext) print( 'In KeyCorners.read_key_corners(), reading found key corners file: ', key_frame_corners_dir_body_ext, @@ -639,10 +514,7 @@ def read_key_corners(self): def read_data(self): # Statistics. - print( - 'In KeyCorners.read_data(), reading frame statistics: ', - self.dict_dir_body_ext, - ) + print('In KeyCorners.read_data(), reading frame statistics: ', self.dict_dir_body_ext) self.frame_statistics_dict = ft.read_dict(self.dict_dir_body_ext) # Confirm what was read. print('In KeyCorners.read_data(), frame statistics read:') @@ -655,19 +527,13 @@ def read_data(self): for key_frame_id in self.mismatched_key_frame_ids: print(' ', key_frame_id) # Heliostats per key frame. - print( - 'In KeyCorners.read_data(), reading heliostats per key frame: ', - self.hpkf_dir_body_ext, - ) + print('In KeyCorners.read_data(), reading heliostats per key frame: ', self.hpkf_dir_body_ext) self.hpkf_dict = ft.read_dict(self.hpkf_dir_body_ext) # Confirm what was read. print('In KeyCorners.read_data(), heliostats per key frame read:') dt.print_dict(self.hpkf_dict, max_keys=7, max_value_length=200, indent=4) # Points per key frame. - print( - 'In KeyCorners.read_data(), reading points per key frame: ', - self.ppkf_dir_body_ext, - ) + print('In KeyCorners.read_data(), reading points per key frame: ', self.ppkf_dir_body_ext) self.ppkf_dict = ft.read_dict(self.ppkf_dir_body_ext) # Confirm what was read. print('In KeyCorners.read_data(), points per key frame read:') @@ -676,9 +542,7 @@ def read_data(self): # RENDER RESULT def render(self): - if ( - self.render_control.draw_key_corners and self.generated_key_corners - ): # Don't render unless we generated. + if self.render_control.draw_key_corners and self.generated_key_corners: # Don't render unless we generated. print('In KeyCorners.render(), rendering frames with key corners...') # Descriptive strings. title_name = 'Key Frame Corners' @@ -687,9 +551,7 @@ def render(self): fig_suffix = '_key_corners_fig' delete_suffix = '.JPG' + fig_suffix + '.png' # Prepare directory. - upc.prepare_render_directory( - self.output_render_dir, delete_suffix, self.render_control - ) + upc.prepare_render_directory(self.output_render_dir, delete_suffix, self.render_control) # Setup annotation styles. style_dict = {} style_dict['point_seq'] = rcps.marker( @@ -734,8 +596,7 @@ def render(self): ) # Input/output sources. input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_key_frames_dir_body_ext = ( experiment_dir() @@ -745,20 +606,15 @@ def render(self): experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/080c_FramesNoDuplicates/mavic_zoom/frames/' ) - input_frame_id_format = ( - '06d' # Note different from format used in ffmpeg call, which is '.%06d' - ) + input_frame_id_format = '06d' # Note different from format used in ffmpeg call, which is '.%06d' output_data_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/140_KeyCorners/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/140_KeyCorners/mavic_zoom/data/' ) output_render_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/140_KeyCorners/mavic_zoom/render/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/140_KeyCorners/mavic_zoom/render/' ) output_construction_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/140c_KeyCorners/mavic_zoom/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/140c_KeyCorners/mavic_zoom/' ) # Render control. render_control = rckc.default() diff --git a/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py b/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py index 85a90b63..417c5a45 100644 --- a/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py +++ b/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py @@ -51,22 +51,14 @@ def __init__( ): # Flags to control rendering on this run. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In KeyTracks.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In KeyTracks.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In KeyTracks.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In KeyTracks.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In KeyTracks.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In KeyTracks.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. @@ -90,12 +82,8 @@ def __init__( self.render_control = render_control # Found key tracks file names. - self.key_frame_projected_tracks_dir = os.path.join( - self.output_data_dir, 'key_frame_projected_tracks' - ) - self.key_frame_confirmed_tracks_dir = os.path.join( - self.output_data_dir, 'key_frame_confirmed_tracks' - ) + self.key_frame_projected_tracks_dir = os.path.join(self.output_data_dir, 'key_frame_projected_tracks') + self.key_frame_confirmed_tracks_dir = os.path.join(self.output_data_dir, 'key_frame_confirmed_tracks') # Summary statistics file name. self.dict_body = self.input_video_body + '_key_frames_with_corners_statistics' @@ -105,16 +93,12 @@ def __init__( # File listing key frames with mismatched heliostats. self.mismatched_ids_body = self.input_video_body + '_mismatched_key_frame_ids' self.mismatched_ids_body_ext = self.mismatched_ids_body + '.txt' - self.mismatched_ids_dir_body_ext = os.path.join( - self.output_data_dir, self.mismatched_ids_body_ext - ) + self.mismatched_ids_dir_body_ext = os.path.join(self.output_data_dir, self.mismatched_ids_body_ext) # Tracked frames per key frame file name. self.tfpkf_body = self.input_video_body + '_tracked_frames_per_key_frame' self.tfpkf_body_ext = self.tfpkf_body + '.csv' - self.tfpkf_dir_body_ext = os.path.join( - self.output_data_dir, self.tfpkf_body_ext - ) + self.tfpkf_dir_body_ext = os.path.join(self.output_data_dir, self.tfpkf_body_ext) # Load key corners files. # Projected. @@ -122,39 +106,28 @@ def __init__( 'In KeyTracks.__init__(), reading found key projected corners directory: ', self.input_key_projected_corners_dir, ) - key_projected_corners_body_ext_list = ft.files_in_directory( - self.input_key_projected_corners_dir - ) + key_projected_corners_body_ext_list = ft.files_in_directory(self.input_key_projected_corners_dir) self.key_projected_corners_dict = {} for key_projected_corners_body_ext in key_projected_corners_body_ext_list: - key_frame_id_str = upf.frame_id_str_given_key_corners_body_ext( - key_projected_corners_body_ext - ) + key_frame_id_str = upf.frame_id_str_given_key_corners_body_ext(key_projected_corners_body_ext) key_frame_id = upf.frame_id_given_frame_id_str(key_frame_id_str) - if (self.specific_frame_ids is None) or ( - key_frame_id in self.specific_frame_ids - ): + if (self.specific_frame_ids is None) or (key_frame_id in self.specific_frame_ids): key_corners_dir_body_ext = os.path.join( self.input_key_projected_corners_dir, key_projected_corners_body_ext ) print( - 'In KeyTracks.__init__(), reading found projected key corners file: ', - key_corners_dir_body_ext, + 'In KeyTracks.__init__(), reading found projected key corners file: ', key_corners_dir_body_ext ) key_frame_projected_corners_fnxl = fnxl.FrameNameXyList() key_frame_projected_corners_fnxl.load(key_corners_dir_body_ext) # Store results. key_frame_dict = {} self.key_projected_corners_dict[key_frame_id] = key_frame_dict - self.key_projected_corners_dict[key_frame_id][ - 'key_frame_id_str' - ] = key_frame_id_str + self.key_projected_corners_dict[key_frame_id]['key_frame_id_str'] = key_frame_id_str self.key_projected_corners_dict[key_frame_id][ 'key_projected_corners_body_ext' ] = key_projected_corners_body_ext - self.key_projected_corners_dict[key_frame_id][ - 'key_corners_dir_body_ext' - ] = key_corners_dir_body_ext + self.key_projected_corners_dict[key_frame_id]['key_corners_dir_body_ext'] = key_corners_dir_body_ext self.key_projected_corners_dict[key_frame_id][ 'key_frame_projected_corners_fnxl' ] = key_frame_projected_corners_fnxl @@ -166,39 +139,28 @@ def __init__( 'In KeyTracks.__init__(), reading found key confirmed corners directory: ', self.input_key_confirmed_corners_dir, ) - key_confirmed_corners_body_ext_list = ft.files_in_directory( - self.input_key_confirmed_corners_dir - ) + key_confirmed_corners_body_ext_list = ft.files_in_directory(self.input_key_confirmed_corners_dir) self.key_confirmed_corners_dict = {} for key_confirmed_corners_body_ext in key_confirmed_corners_body_ext_list: - key_frame_id_str = upf.frame_id_str_given_key_corners_body_ext( - key_confirmed_corners_body_ext - ) + key_frame_id_str = upf.frame_id_str_given_key_corners_body_ext(key_confirmed_corners_body_ext) key_frame_id = upf.frame_id_given_frame_id_str(key_frame_id_str) - if (self.specific_frame_ids is None) or ( - key_frame_id in self.specific_frame_ids - ): + if (self.specific_frame_ids is None) or (key_frame_id in self.specific_frame_ids): key_corners_dir_body_ext = os.path.join( self.input_key_confirmed_corners_dir, key_confirmed_corners_body_ext ) print( - 'In KeyTracks.__init__(), reading found confirmed key corners file: ', - key_corners_dir_body_ext, + 'In KeyTracks.__init__(), reading found confirmed key corners file: ', key_corners_dir_body_ext ) key_frame_confirmed_corners_fnxl = fnxl.FrameNameXyList() key_frame_confirmed_corners_fnxl.load(key_corners_dir_body_ext) # Store results. key_frame_dict = {} self.key_confirmed_corners_dict[key_frame_id] = key_frame_dict - self.key_confirmed_corners_dict[key_frame_id][ - 'key_frame_id_str' - ] = key_frame_id_str + self.key_confirmed_corners_dict[key_frame_id]['key_frame_id_str'] = key_frame_id_str self.key_confirmed_corners_dict[key_frame_id][ 'key_confirmed_corners_body_ext' ] = key_confirmed_corners_body_ext - self.key_confirmed_corners_dict[key_frame_id][ - 'key_corners_dir_body_ext' - ] = key_corners_dir_body_ext + self.key_confirmed_corners_dict[key_frame_id]['key_corners_dir_body_ext'] = key_corners_dir_body_ext self.key_confirmed_corners_dict[key_frame_id][ 'key_frame_confirmed_corners_fnxl' ] = key_frame_confirmed_corners_fnxl @@ -209,15 +171,11 @@ def __init__( # Fetch a list of all frame ids in the video (not just key frames). # The corresponding frame_ids are not necessarily in sequential order, because # we previously removed spurious duplicate frames. - self.all_frame_body_ext_list = ft.files_in_directory( - self.input_frame_dir, sort=True - ) + self.all_frame_body_ext_list = ft.files_in_directory(self.input_frame_dir, sort=True) # Confirm what was read. max_print_files = 12 print('In KeyTracks.__init__(), self.all_frame_body_ext_list:') - for frame_file in self.all_frame_body_ext_list[ - 0 : min(max_print_files, len(self.all_frame_body_ext_list)) - ]: + for frame_file in self.all_frame_body_ext_list[0 : min(max_print_files, len(self.all_frame_body_ext_list))]: print('In KeyTracks.__init__() ', frame_file) print('...') @@ -242,9 +200,7 @@ def find_and_save_key_tracks(self): or (not ft.directory_exists(self.output_data_dir)) or ft.directory_is_empty(self.output_data_dir) ): - print( - 'In KeyTracks.find_and_save_key_tracks(), constructing key frame tracks...' - ) + print('In KeyTracks.find_and_save_key_tracks(), constructing key frame tracks...') # Determine which key frames to process. key_frame_ids_to_process = dt.sorted_keys( @@ -253,26 +209,16 @@ def find_and_save_key_tracks(self): # Process each key frame_id. if self.single_processor: - print( - 'In KeyTracks.search_key_tracks(), starting key frame corner tracking (single processor)...' - ) + print('In KeyTracks.search_key_tracks(), starting key frame corner tracking (single processor)...') list_of_result_dicts = [] for key_frame_id in key_frame_ids_to_process: list_of_result_dicts.append(self.search_key_track(key_frame_id)) else: - print( - 'In KeyTracks.search_key_tracks(), starting key frame corner tracking (multi-processor)...' - ) - logger = logt.multiprocessing_logger( - self.log_dir_body_ext, level=logging.INFO - ) - logger.info( - '================================= Execution =================================' - ) + print('In KeyTracks.search_key_tracks(), starting key frame corner tracking (multi-processor)...') + logger = logt.multiprocessing_logger(self.log_dir_body_ext, level=logging.INFO) + logger.info('================================= Execution =================================') with Pool(25) as pool: - list_of_result_dicts = pool.map( - self.search_key_track, key_frame_ids_to_process - ) + list_of_result_dicts = pool.map(self.search_key_track, key_frame_ids_to_process) print( 'In KeyTracks.search_key_tracks(), key frame corner tracking done. len(list_of_result_dicts) =', @@ -280,29 +226,19 @@ def find_and_save_key_tracks(self): ) # Summarize search results. - print( - 'In KeyTracks.find_and_save_key_tracks(), key_frame_projected_track_fnxls:' - ) + print('In KeyTracks.find_and_save_key_tracks(), key_frame_projected_track_fnxls:') for result_dict in list_of_result_dicts: key_frame_id = result_dict['key_frame_id'] - key_frame_projected_track_fnxl = result_dict[ - 'key_frame_projected_track_fnxl' - ] + key_frame_projected_track_fnxl = result_dict['key_frame_projected_track_fnxl'] print( ' ' + str(key_frame_id) + ':' ) # Using "str(key_frame_id)" is okay, because we don't want leading zeros. key_frame_projected_track_fnxl.print(max_value_length=200, indent=8) - print( - 'In KeyTracks.find_and_save_key_tracks(), key_frame_confirmed_track_fnxls:' - ) + print('In KeyTracks.find_and_save_key_tracks(), key_frame_confirmed_track_fnxls:') for result_dict in list_of_result_dicts: key_frame_id = result_dict['key_frame_id'] - key_frame_confirmed_track_fnxl = result_dict[ - 'key_frame_confirmed_track_fnxl' - ] - key_frame_confirmed_track_fnxl = result_dict[ - 'key_frame_confirmed_track_fnxl' - ] + key_frame_confirmed_track_fnxl = result_dict['key_frame_confirmed_track_fnxl'] + key_frame_confirmed_track_fnxl = result_dict['key_frame_confirmed_track_fnxl'] print( ' ' + str(key_frame_id) + ':' ) # Using "str(key_frame_id)" is okay, because we don't want leading zeros. @@ -323,34 +259,26 @@ def find_and_save_key_tracks(self): def search_key_track(self, key_frame_id): # Notify start. - print( - 'In KeyTracks.search_key_track(), fetching key frames for key_frame_id=' - + str(key_frame_id) - + '...' - ) + print('In KeyTracks.search_key_track(), fetching key frames for key_frame_id=' + str(key_frame_id) + '...') # Initialize logger. if not self.single_processor: # Don't make this a data member of self -- it will cause error: "can't pickle _thread.RLock objects" - local_logger = logt.multiprocessing_logger( - self.log_dir_body_ext, level=logging.INFO - ) + local_logger = logt.multiprocessing_logger(self.log_dir_body_ext, level=logging.INFO) else: local_logger = None try: # Input key frame corners. - key_frame_projected_corners_fnxl = self.key_projected_corners_dict[ - key_frame_id - ]['key_frame_projected_corners_fnxl'] - key_frame_confirmed_corners_fnxl = self.key_confirmed_corners_dict[ - key_frame_id - ]['key_frame_confirmed_corners_fnxl'] + key_frame_projected_corners_fnxl = self.key_projected_corners_dict[key_frame_id][ + 'key_frame_projected_corners_fnxl' + ] + key_frame_confirmed_corners_fnxl = self.key_confirmed_corners_dict[key_frame_id][ + 'key_frame_confirmed_corners_fnxl' + ] # Solar field parameters. - specifications = ( - Dspec.nsttf_specifications() - ) # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL + specifications = Dspec.nsttf_specifications() # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL # Execution control. iterations = 3 # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS @@ -401,27 +329,20 @@ def search_key_track(self, key_frame_id): logt.info( local_logger, - 'In KeyTracks.search_key_track_aux(), corners done key_frame_id=' - + str(key_frame_id) - + '.', + 'In KeyTracks.search_key_track_aux(), corners done key_frame_id=' + str(key_frame_id) + '.', ) except: error_type, error_instance, traceback = sys.exc_info() logt.log_and_raise_value_error( local_logger, - 'In KeyTracks.search_key_track(), Key Frame 1 Processing Exception: ' - + str(error_instance.args[0]), + 'In KeyTracks.search_key_track(), Key Frame 1 Processing Exception: ' + str(error_instance.args[0]), ) # Assemble result dictionary. result_dict = {} result_dict['key_frame_id'] = key_frame_id - result_dict['key_frame_projected_track_fnxl'] = ( - search_result.key_frame_projected_track_fnxl - ) - result_dict['key_frame_confirmed_track_fnxl'] = ( - search_result.key_frame_confirmed_track_fnxl - ) + result_dict['key_frame_projected_track_fnxl'] = search_result.key_frame_projected_track_fnxl + result_dict['key_frame_confirmed_track_fnxl'] = search_result.key_frame_confirmed_track_fnxl return result_dict # WRITE RESULT @@ -430,18 +351,11 @@ def save_key_tracks(self, list_of_result_dicts): ft.create_directories_if_necessary(self.output_data_dir) # The FrameNameXyList track results for each key frame. for result_dict in list_of_result_dicts: - key_frame_id_str = upf.frame_id_str_given_frame_id( - result_dict['key_frame_id'], self.input_frame_id_format - ) + key_frame_id_str = upf.frame_id_str_given_frame_id(result_dict['key_frame_id'], self.input_frame_id_format) # Projected. - key_frame_projected_track_fnxl = result_dict[ - 'key_frame_projected_track_fnxl' - ] + key_frame_projected_track_fnxl = result_dict['key_frame_projected_track_fnxl'] key_frame_projected_track_body_ext = ( - self.input_video_body - + '_' - + key_frame_id_str - + '_projected_tracks_fnxl.csv' + self.input_video_body + '_' + key_frame_id_str + '_projected_tracks_fnxl.csv' ) key_frame_projected_track_dir_body_ext = os.path.join( self.key_frame_projected_tracks_dir, key_frame_projected_track_body_ext @@ -452,14 +366,9 @@ def save_key_tracks(self, list_of_result_dicts): ) key_frame_projected_track_fnxl.save(key_frame_projected_track_dir_body_ext) # Confirmed. - key_frame_confirmed_track_fnxl = result_dict[ - 'key_frame_confirmed_track_fnxl' - ] + key_frame_confirmed_track_fnxl = result_dict['key_frame_confirmed_track_fnxl'] key_frame_confirmed_track_body_ext = ( - self.input_video_body - + '_' - + key_frame_id_str - + '_confirmed_tracks_fnxl.csv' + self.input_video_body + '_' + key_frame_id_str + '_confirmed_tracks_fnxl.csv' ) key_frame_confirmed_track_dir_body_ext = os.path.join( self.key_frame_confirmed_tracks_dir, key_frame_confirmed_track_body_ext @@ -475,32 +384,18 @@ def save_data(self, list_of_result_dicts): summary_dict = {} summary_dict['n_key_frame_tracks'] = len(list_of_result_dicts) print('In KeyTracks.save_data(), writing key frame summary statistics...') - ft.write_dict_file( - 'key frame tracks summary statistics', - self.output_data_dir, - self.dict_body, - summary_dict, - ) + ft.write_dict_file('key frame tracks summary statistics', self.output_data_dir, self.dict_body, summary_dict) # Tracked frames per key frame. tracked_frames_per_key_frame_dict = {} for result_dict in list_of_result_dicts: key_frame_id = result_dict['key_frame_id'] - key_frame_projected_track_fnxl = result_dict[ - 'key_frame_projected_track_fnxl' - ] - tracked_frames_per_key_frame_dict[key_frame_id] = ( - key_frame_projected_track_fnxl.number_of_frames() - ) + key_frame_projected_track_fnxl = result_dict['key_frame_projected_track_fnxl'] + tracked_frames_per_key_frame_dict[key_frame_id] = key_frame_projected_track_fnxl.number_of_frames() print( 'In KeyTracks.save_data(), writing tracked_frames per key frame:', os.path.join(self.output_data_dir, self.tfpkf_body_ext), ) - ft.write_dict_file( - None, - self.output_data_dir, - self.tfpkf_body, - tracked_frames_per_key_frame_dict, - ) + ft.write_dict_file(None, self.output_data_dir, self.tfpkf_body, tracked_frames_per_key_frame_dict) # LOAD RESULT @@ -510,73 +405,51 @@ def read_key_tracks(self): 'In KeyTracks.read_key_tracks(), reading found key frame projected tracks directory: ', self.key_frame_projected_tracks_dir, ) - key_frame_projected_track_body_ext_list = ft.files_in_directory( - self.key_frame_projected_tracks_dir - ) + key_frame_projected_track_body_ext_list = ft.files_in_directory(self.key_frame_projected_tracks_dir) self.list_of_key_frame_projected_track_dir_body_ext = [] self.list_of_key_frame_projected_track_fnxls = [] - for ( - key_frame_projected_track_body_ext - ) in key_frame_projected_track_body_ext_list: + for key_frame_projected_track_body_ext in key_frame_projected_track_body_ext_list: key_frame_projected_track_dir_body_ext = os.path.join( self.key_frame_projected_tracks_dir, key_frame_projected_track_body_ext ) - self.list_of_key_frame_projected_track_dir_body_ext.append( - key_frame_projected_track_dir_body_ext - ) + self.list_of_key_frame_projected_track_dir_body_ext.append(key_frame_projected_track_dir_body_ext) print( 'In KeyTracks.read_key_tracks(), reading found key frame projected track: ', key_frame_projected_track_dir_body_ext, ) key_frame_projected_track_fnxl = fnxl.FrameNameXyList() key_frame_projected_track_fnxl.load(key_frame_projected_track_dir_body_ext) - self.list_of_key_frame_projected_track_fnxls.append( - key_frame_projected_track_fnxl - ) + self.list_of_key_frame_projected_track_fnxls.append(key_frame_projected_track_fnxl) # Confirmed. print( 'In KeyTracks.read_key_tracks(), reading found key frame confirmed tracks directory: ', self.key_frame_confirmed_tracks_dir, ) - key_frame_confirmed_track_body_ext_list = ft.files_in_directory( - self.key_frame_confirmed_tracks_dir - ) + key_frame_confirmed_track_body_ext_list = ft.files_in_directory(self.key_frame_confirmed_tracks_dir) self.list_of_key_frame_confirmed_track_dir_body_ext = [] self.list_of_key_frame_confirmed_track_fnxls = [] - for ( - key_frame_confirmed_track_body_ext - ) in key_frame_confirmed_track_body_ext_list: + for key_frame_confirmed_track_body_ext in key_frame_confirmed_track_body_ext_list: key_frame_confirmed_track_dir_body_ext = os.path.join( self.key_frame_confirmed_tracks_dir, key_frame_confirmed_track_body_ext ) - self.list_of_key_frame_confirmed_track_dir_body_ext.append( - key_frame_confirmed_track_dir_body_ext - ) + self.list_of_key_frame_confirmed_track_dir_body_ext.append(key_frame_confirmed_track_dir_body_ext) print( 'In KeyTracks.read_key_tracks(), reading found key frame confirmed track: ', key_frame_confirmed_track_dir_body_ext, ) key_frame_confirmed_track_fnxl = fnxl.FrameNameXyList() key_frame_confirmed_track_fnxl.load(key_frame_confirmed_track_dir_body_ext) - self.list_of_key_frame_confirmed_track_fnxls.append( - key_frame_confirmed_track_fnxl - ) + self.list_of_key_frame_confirmed_track_fnxls.append(key_frame_confirmed_track_fnxl) def read_data(self): # Statistics. - print( - 'In KeyTracks.read_data(), reading frame statistics: ', - self.dict_dir_body_ext, - ) + print('In KeyTracks.read_data(), reading frame statistics: ', self.dict_dir_body_ext) self.frame_statistics_dict = ft.read_dict(self.dict_dir_body_ext) # Confirm what was read. print('In KeyTracks.read_data(), frame statistics read:') dt.print_dict(self.frame_statistics_dict, indent=4) # Tracked frames per key frame. - print( - 'In KeyTracks.read_data(), reading tracked frames per key frame: ', - self.tfpkf_dir_body_ext, - ) + print('In KeyTracks.read_data(), reading tracked frames per key frame: ', self.tfpkf_dir_body_ext) self.tfpkf_dict = ft.read_dict(self.tfpkf_dir_body_ext) # Confirm what was read. print('In KeyTracks.read_data(), tracked frames per key frame read:') @@ -585,13 +458,9 @@ def read_data(self): # RENDER RESULT def render(self): - if ( - self.render_control.draw_key_tracks and self.generated_key_tracks - ): # Don't render unless we generated. + if self.render_control.draw_key_tracks and self.generated_key_tracks: # Don't render unless we generated. print('In KeyTracks.render(), rendering key frame tracks...') - print( - 'WARNING: In KeyTracks.render(), track rendering not implemented yet.' - ) + print('WARNING: In KeyTracks.render(), track rendering not implemented yet.') if __name__ == "__main__": @@ -613,8 +482,7 @@ def render(self): ) # Input/output sources. input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_key_projected_corners_dir = ( experiment_dir() @@ -628,23 +496,18 @@ def render(self): experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/080c_FramesNoDuplicates/mavic_zoom/frames/' ) - input_frame_id_format = ( - '06d' # Note different from format used in ffmpeg call, which is '.%06d' - ) + input_frame_id_format = '06d' # Note different from format used in ffmpeg call, which is '.%06d' # output_data_dir = experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/Small_150_KeyTracks/mavic_zoom/data/' # output_render_dir = experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/Small_150_KeyTracks/mavic_zoom/render/' # output_construction_dir = experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/Small_150c_KeyTracks/mavic_zoom/' output_data_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/150_KeyTracks/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/150_KeyTracks/mavic_zoom/data/' ) output_render_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/150_KeyTracks/mavic_zoom/render/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/150_KeyTracks/mavic_zoom/render/' ) output_construction_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/150c_KeyTracks/mavic_zoom/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/150c_KeyTracks/mavic_zoom/' ) # Render control. render_control = rckt.default() diff --git a/contrib/app/ufacet-s/helio_scan/160_VideoTracks.py b/contrib/app/ufacet-s/helio_scan/160_VideoTracks.py index 66d4bc6a..2cea3053 100644 --- a/contrib/app/ufacet-s/helio_scan/160_VideoTracks.py +++ b/contrib/app/ufacet-s/helio_scan/160_VideoTracks.py @@ -49,22 +49,14 @@ def __init__( ): # Flags to control rendering on this run, for the confirmed video. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In VideoTracks.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In VideoTracks.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In VideoTracks.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In VideoTracks.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In VideoTracks.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In VideoTracks.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. @@ -91,95 +83,59 @@ def __init__( # Found video tracks file names. # Projected. - self.video_projected_tracks_body = ( - self.input_video_body + '_video_projected_tracks_fnxl' - ) + self.video_projected_tracks_body = self.input_video_body + '_video_projected_tracks_fnxl' self.video_projected_tracks_body_ext = self.video_projected_tracks_body + '.csv' self.video_projected_tracks_dir_body_ext = os.path.join( self.output_data_dir, self.video_projected_tracks_body_ext ) # Confirmed. - self.video_confirmed_tracks_body = ( - self.input_video_body + '_video_confirmed_tracks_fnxl' - ) + self.video_confirmed_tracks_body = self.input_video_body + '_video_confirmed_tracks_fnxl' self.video_confirmed_tracks_body_ext = self.video_confirmed_tracks_body + '.csv' self.video_confirmed_tracks_dir_body_ext = os.path.join( self.output_data_dir, self.video_confirmed_tracks_body_ext ) # Output construction frame directories. - self.output_construction_projected_dir = os.path.join( - self.output_construction_dir, 'projected' - ) - self.output_construction_confirmed_dir = os.path.join( - self.output_construction_dir, 'confirmed' - ) + self.output_construction_projected_dir = os.path.join(self.output_construction_dir, 'projected') + self.output_construction_confirmed_dir = os.path.join(self.output_construction_dir, 'confirmed') # Output video file name. # Projected. - self.output_video_projected_body = ( - self.input_video_body + '_video_projected_tracks' - ) - self.output_video_projected_dir_body = os.path.join( - self.output_render_dir, self.output_video_projected_body - ) + self.output_video_projected_body = self.input_video_body + '_video_projected_tracks' + self.output_video_projected_dir_body = os.path.join(self.output_render_dir, self.output_video_projected_body) # Confirmed. - self.output_video_confirmed_body = ( - self.input_video_body + '_video_confirmed_tracks' - ) - self.output_video_confirmed_dir_body = os.path.join( - self.output_render_dir, self.output_video_confirmed_body - ) + self.output_video_confirmed_body = self.input_video_body + '_video_confirmed_tracks' + self.output_video_confirmed_dir_body = os.path.join(self.output_render_dir, self.output_video_confirmed_body) # Summary statistics file name. # Projected. - self.dict_projected_body = ( - self.input_video_body + '_video_projected_tracks_statistics' - ) + self.dict_projected_body = self.input_video_body + '_video_projected_tracks_statistics' self.dict_projected_body_ext = self.dict_projected_body + '.csv' - self.dict_projected_dir_body_ext = os.path.join( - self.output_data_dir, self.dict_projected_body_ext - ) + self.dict_projected_dir_body_ext = os.path.join(self.output_data_dir, self.dict_projected_body_ext) # Confirmed. - self.dict_confirmed_body = ( - self.input_video_body + '_video_confirmed_tracks_statistics' - ) + self.dict_confirmed_body = self.input_video_body + '_video_confirmed_tracks_statistics' self.dict_confirmed_body_ext = self.dict_confirmed_body + '.csv' - self.dict_confirmed_dir_body_ext = os.path.join( - self.output_data_dir, self.dict_confirmed_body_ext - ) + self.dict_confirmed_dir_body_ext = os.path.join(self.output_data_dir, self.dict_confirmed_body_ext) # Heliostats per video frame file name. # Projected. - self.phpvf_body = ( - self.input_video_body + '_projected_heliostats_per_video_frame' - ) + self.phpvf_body = self.input_video_body + '_projected_heliostats_per_video_frame' self.phpvf_body_ext = self.phpvf_body + '.csv' - self.phpvf_dir_body_ext = os.path.join( - self.output_data_dir, self.phpvf_body_ext - ) + self.phpvf_dir_body_ext = os.path.join(self.output_data_dir, self.phpvf_body_ext) # Confirmed. - self.chpvf_body = ( - self.input_video_body + '_confirmed_heliostats_per_video_frame' - ) + self.chpvf_body = self.input_video_body + '_confirmed_heliostats_per_video_frame' self.chpvf_body_ext = self.chpvf_body + '.csv' - self.chpvf_dir_body_ext = os.path.join( - self.output_data_dir, self.chpvf_body_ext - ) + self.chpvf_dir_body_ext = os.path.join(self.output_data_dir, self.chpvf_body_ext) # Points per video frame file name. # Projected. self.pppvf_body = self.input_video_body + '_projected_points_per_video_frame' self.pppvf_body_ext = self.pppvf_body + '.csv' - self.pppvf_dir_body_ext = os.path.join( - self.output_data_dir, self.pppvf_body_ext - ) + self.pppvf_dir_body_ext = os.path.join(self.output_data_dir, self.pppvf_body_ext) # Confirmed. self.cppvf_body = self.input_video_body + '_confirmed_points_per_video_frame' self.cppvf_body_ext = self.cppvf_body + '.csv' - self.cppvf_dir_body_ext = os.path.join( - self.output_data_dir, self.cppvf_body_ext - ) + self.cppvf_dir_body_ext = os.path.join(self.output_data_dir, self.cppvf_body_ext) # Load key frame track files. # Projected. @@ -187,18 +143,12 @@ def __init__( 'In VideoTracks.__init__(), reading found key frame projected tracks directory: ', self.input_key_projected_tracks_dir, ) - key_projected_tracks_body_ext_list = ft.files_in_directory( - self.input_key_projected_tracks_dir - ) + key_projected_tracks_body_ext_list = ft.files_in_directory(self.input_key_projected_tracks_dir) self.key_projected_tracks_dict = {} for key_projected_tracks_body_ext in key_projected_tracks_body_ext_list: - key_frame_id_str = upf.frame_id_str_given_key_projected_tracks_body_ext( - key_projected_tracks_body_ext - ) + key_frame_id_str = upf.frame_id_str_given_key_projected_tracks_body_ext(key_projected_tracks_body_ext) key_frame_id = upf.frame_id_given_frame_id_str(key_frame_id_str) - if (self.specific_frame_ids is None) or ( - key_frame_id in self.specific_frame_ids - ): + if (self.specific_frame_ids is None) or (key_frame_id in self.specific_frame_ids): key_projected_tracks_dir_body_ext = os.path.join( self.input_key_projected_tracks_dir, key_projected_tracks_body_ext ) @@ -214,49 +164,37 @@ def __init__( # Store results. key_frame_dict = {} self.key_projected_tracks_dict[key_frame_id] = key_frame_dict - self.key_projected_tracks_dict[key_frame_id][ - 'key_frame_id_str' - ] = key_frame_id_str + self.key_projected_tracks_dict[key_frame_id]['key_frame_id_str'] = key_frame_id_str self.key_projected_tracks_dict[key_frame_id][ 'key_projected_track_body_ext' ] = key_projected_tracks_body_ext self.key_projected_tracks_dict[key_frame_id][ 'key_projected_track_dir_body_ext' ] = key_projected_tracks_dir_body_ext - self.key_projected_tracks_dict[key_frame_id][ - 'key_projected_track_fnxl' - ] = key_projected_track_fnxl - self.key_projected_tracks_dict[key_frame_id][ - 'key_projected_track_n_frames' - ] = len(key_projected_track_fnxl_sorted_frame_id_list) - self.key_projected_tracks_dict[key_frame_id][ - 'key_projected_track_min_frame_id' - ] = key_projected_track_fnxl_sorted_frame_id_list[0] - self.key_projected_tracks_dict[key_frame_id][ - 'key_projected_track_max_frame_id' - ] = key_projected_track_fnxl_sorted_frame_id_list[-1] + self.key_projected_tracks_dict[key_frame_id]['key_projected_track_fnxl'] = key_projected_track_fnxl + self.key_projected_tracks_dict[key_frame_id]['key_projected_track_n_frames'] = len( + key_projected_track_fnxl_sorted_frame_id_list + ) + self.key_projected_tracks_dict[key_frame_id]['key_projected_track_min_frame_id'] = ( + key_projected_track_fnxl_sorted_frame_id_list[0] + ) + self.key_projected_tracks_dict[key_frame_id]['key_projected_track_max_frame_id'] = ( + key_projected_track_fnxl_sorted_frame_id_list[-1] + ) # Confirm what was read. print('In KeyTracks.__init__(), found key frame projected tracks dictionary:') - dt.print_dict_of_dicts( - self.key_projected_tracks_dict, max_value_2_length=200, indent_1=4 - ) + dt.print_dict_of_dicts(self.key_projected_tracks_dict, max_value_2_length=200, indent_1=4) # Confirmed. print( 'In VideoTracks.__init__(), reading found key frame confirmed tracks directory: ', self.input_key_confirmed_tracks_dir, ) - key_confirmed_tracks_body_ext_list = ft.files_in_directory( - self.input_key_confirmed_tracks_dir - ) + key_confirmed_tracks_body_ext_list = ft.files_in_directory(self.input_key_confirmed_tracks_dir) self.key_confirmed_tracks_dict = {} for key_confirmed_tracks_body_ext in key_confirmed_tracks_body_ext_list: - key_frame_id_str = upf.frame_id_str_given_key_confirmed_tracks_body_ext( - key_confirmed_tracks_body_ext - ) + key_frame_id_str = upf.frame_id_str_given_key_confirmed_tracks_body_ext(key_confirmed_tracks_body_ext) key_frame_id = upf.frame_id_given_frame_id_str(key_frame_id_str) - if (self.specific_frame_ids is None) or ( - key_frame_id in self.specific_frame_ids - ): + if (self.specific_frame_ids is None) or (key_frame_id in self.specific_frame_ids): key_confirmed_tracks_dir_body_ext = os.path.join( self.input_key_confirmed_tracks_dir, key_confirmed_tracks_body_ext ) @@ -272,45 +210,35 @@ def __init__( # Store results. key_frame_dict = {} self.key_confirmed_tracks_dict[key_frame_id] = key_frame_dict - self.key_confirmed_tracks_dict[key_frame_id][ - 'key_frame_id_str' - ] = key_frame_id_str + self.key_confirmed_tracks_dict[key_frame_id]['key_frame_id_str'] = key_frame_id_str self.key_confirmed_tracks_dict[key_frame_id][ 'key_confirmed_track_body_ext' ] = key_confirmed_tracks_body_ext self.key_confirmed_tracks_dict[key_frame_id][ 'key_confirmed_track_dir_body_ext' ] = key_confirmed_tracks_dir_body_ext - self.key_confirmed_tracks_dict[key_frame_id][ - 'key_confirmed_track_fnxl' - ] = key_confirmed_track_fnxl - self.key_confirmed_tracks_dict[key_frame_id][ - 'key_confirmed_track_n_frames' - ] = len(key_confirmed_track_fnxl_sorted_frame_id_list) - self.key_confirmed_tracks_dict[key_frame_id][ - 'key_confirmed_track_min_frame_id' - ] = key_confirmed_track_fnxl_sorted_frame_id_list[0] - self.key_confirmed_tracks_dict[key_frame_id][ - 'key_confirmed_track_max_frame_id' - ] = key_confirmed_track_fnxl_sorted_frame_id_list[-1] + self.key_confirmed_tracks_dict[key_frame_id]['key_confirmed_track_fnxl'] = key_confirmed_track_fnxl + self.key_confirmed_tracks_dict[key_frame_id]['key_confirmed_track_n_frames'] = len( + key_confirmed_track_fnxl_sorted_frame_id_list + ) + self.key_confirmed_tracks_dict[key_frame_id]['key_confirmed_track_min_frame_id'] = ( + key_confirmed_track_fnxl_sorted_frame_id_list[0] + ) + self.key_confirmed_tracks_dict[key_frame_id]['key_confirmed_track_max_frame_id'] = ( + key_confirmed_track_fnxl_sorted_frame_id_list[-1] + ) # Confirm what was read. print('In KeyTracks.__init__(), found key frame confirmed tracks dictionary:') - dt.print_dict_of_dicts( - self.key_confirmed_tracks_dict, max_value_2_length=200, indent_1=4 - ) + dt.print_dict_of_dicts(self.key_confirmed_tracks_dict, max_value_2_length=200, indent_1=4) # Fetch a list of all frame ids in the video (not just key frames). # The corresponding frame_ids are not necessarily in sequential order, because # we previously removed spurious duplicate frames. - self.all_frame_file_list = ft.files_in_directory( - self.input_frame_dir, sort=True - ) + self.all_frame_file_list = ft.files_in_directory(self.input_frame_dir, sort=True) # Confirm what was read. max_print_files = 12 print('In VideoTracks.__init__(), self.all_frame_file_list:') - for frame_file in self.all_frame_file_list[ - 0 : min(max_print_files, len(self.all_frame_file_list)) - ]: + for frame_file in self.all_frame_file_list[0 : min(max_print_files, len(self.all_frame_file_list))]: print('In VideoTracks.__init__() ', frame_file) print('...') @@ -339,19 +267,13 @@ def construct_and_save_video_tracks(self): self.generated_video_projected_tracks = False self.generated_video_confirmed_tracks = False # Projected. - self.construct_and_save_video_tracks_aux( - self.key_projected_tracks_dict, 'projected' - ) + self.construct_and_save_video_tracks_aux(self.key_projected_tracks_dict, 'projected') self.generated_video_projected_tracks = True # Confirmed. - self.construct_and_save_video_tracks_aux( - self.key_confirmed_tracks_dict, 'confirmed' - ) + self.construct_and_save_video_tracks_aux(self.key_confirmed_tracks_dict, 'confirmed') self.generated_video_confirmed_tracks = True - def construct_and_save_video_tracks_aux( - self, key_tracks_dict, projected_or_confirmed_str - ): + def construct_and_save_video_tracks_aux(self, key_tracks_dict, projected_or_confirmed_str): print( 'In VideoTracks.construct_and_save_video_tracks_aux(), constructing ' + projected_or_confirmed_str @@ -370,20 +292,14 @@ def construct_and_save_video_tracks_aux( # Add key frame tracks. # (The selection of which key frames to consider has already been taken into account, by only including key frames of interest in the key_tracks_dict.) # Determine which key frames to process. - key_frame_ids_to_process = dt.sorted_keys( - key_tracks_dict - ) # Already pruned to key frame ids of interest. + key_frame_ids_to_process = dt.sorted_keys(key_tracks_dict) # Already pruned to key frame ids of interest. for key_frame_id in key_frame_ids_to_process: # Fetch the FrameNameXyList object for holding the tracks for this key frame. key_frame_dict = key_tracks_dict[key_frame_id] - key_track_fnxl = key_frame_dict[ - 'key_' + projected_or_confirmed_str + '_track_fnxl' - ] + key_track_fnxl = key_frame_dict['key_' + projected_or_confirmed_str + '_track_fnxl'] for ( frame_id - ) in ( - key_track_fnxl.sorted_frame_id_list() - ): # Use sorted list so status output is easier to understand. + ) in key_track_fnxl.sorted_frame_id_list(): # Use sorted list so status output is easier to understand. video_tracks_fnxl.merge_list_of_name_xy_lists( frame_id, key_track_fnxl.list_of_name_xy_lists(frame_id), @@ -423,10 +339,7 @@ def save_video_tracks(self, video_tracks_fnxl, projected_or_confirmed_str): ) print('ERROR: ' + msg) raise ValueError(msg) - print( - 'In VideoTracks.save_video_tracks(), writing video track file: ', - video_tracks_dir_body_ext, - ) + print('In VideoTracks.save_video_tracks(), writing video track file: ', video_tracks_dir_body_ext) ft.create_directories_if_necessary(self.output_data_dir) video_tracks_fnxl.save(video_tracks_dir_body_ext) @@ -456,9 +369,7 @@ def save_data(self, video_tracks_fnxl, projected_or_confirmed_str): summary_dict = {} summary_dict['n_video_track_frames'] = video_tracks_fnxl.number_of_frames() print( - 'In VideoTracks.save_data(), writing video frame ' - + projected_or_confirmed_str - + ' summary statistics...' + 'In VideoTracks.save_data(), writing video frame ' + projected_or_confirmed_str + ' summary statistics...' ) ft.write_dict_file( 'video ' + projected_or_confirmed_str + ' tracks summary statistics', @@ -469,23 +380,17 @@ def save_data(self, video_tracks_fnxl, projected_or_confirmed_str): # Heliostats per video frame. heliostats_per_video_frame_dict = video_tracks_fnxl.heliostats_per_frame() print( - 'In VideoTracks.save_data(), writing ' - + projected_or_confirmed_str - + ' heliostats per video frame:', + 'In VideoTracks.save_data(), writing ' + projected_or_confirmed_str + ' heliostats per video frame:', os.path.join(self.output_data_dir, hpvf_body_ext), ) - ft.write_dict_file( - None, self.output_data_dir, hpvf_body, heliostats_per_video_frame_dict - ) + ft.write_dict_file(None, self.output_data_dir, hpvf_body, heliostats_per_video_frame_dict) # Points per video frame. points_per_key_frame_dict = video_tracks_fnxl.points_per_frame() print( 'In VideoTracks.save_data(), writing points per video frame: ', os.path.join(self.output_data_dir, ppvf_body_ext), ) - ft.write_dict_file( - None, self.output_data_dir, ppvf_body, points_per_key_frame_dict - ) + ft.write_dict_file(None, self.output_data_dir, ppvf_body, points_per_key_frame_dict) # LOAD RESULT @@ -499,9 +404,7 @@ def read_video_tracks(self): self.video_projected_tracks_fnxl.load(self.video_projected_tracks_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_video_tracks(), video projected tracks read:') - self.video_projected_tracks_fnxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + self.video_projected_tracks_fnxl.print(max_keys=12, max_value_length=200, indent=4) # Confirmed. print( 'In VideoTracks.read_video_tracks(), reading video confirmed tracks file: ', @@ -511,67 +414,43 @@ def read_video_tracks(self): self.video_confirmed_tracks_fnxl.load(self.video_confirmed_tracks_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_video_tracks(), video confirmed tracks read:') - self.video_confirmed_tracks_fnxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + self.video_confirmed_tracks_fnxl.print(max_keys=12, max_value_length=200, indent=4) def read_data(self): # Projected. # Statistics. - print( - 'In VideoTracks.read_data(), reading projected frame statistics: ', - self.dict_projected_dir_body_ext, - ) - self.projected_frame_statistics_dict = ft.read_dict( - self.dict_projected_dir_body_ext - ) + print('In VideoTracks.read_data(), reading projected frame statistics: ', self.dict_projected_dir_body_ext) + self.projected_frame_statistics_dict = ft.read_dict(self.dict_projected_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_data(), projected frame statistics read:') dt.print_dict(self.projected_frame_statistics_dict, indent=4) # Heliostats per video frame. - print( - 'In VideoTracks.read_data(), reading projected heliostats per video frame: ', - self.phpvf_dir_body_ext, - ) + print('In VideoTracks.read_data(), reading projected heliostats per video frame: ', self.phpvf_dir_body_ext) self.phpvf_dict = ft.read_dict(self.phpvf_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_data(), projected heliostats per video frame read:') dt.print_dict(self.phpvf_dict, max_keys=12, max_value_length=200, indent=4) # Points per video frame. - print( - 'In VideoTracks.read_data(), reading projected points per video frame: ', - self.pppvf_dir_body_ext, - ) + print('In VideoTracks.read_data(), reading projected points per video frame: ', self.pppvf_dir_body_ext) self.pppvf_dict = ft.read_dict(self.pppvf_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_data(), projected points per video frame read:') dt.print_dict(self.pppvf_dict, max_keys=7, max_value_length=200, indent=4) # Confirmed. # Statistics. - print( - 'In VideoTracks.read_data(), reading confirmed frame statistics: ', - self.dict_confirmed_dir_body_ext, - ) - self.confirmed_frame_statistics_dict = ft.read_dict( - self.dict_confirmed_dir_body_ext - ) + print('In VideoTracks.read_data(), reading confirmed frame statistics: ', self.dict_confirmed_dir_body_ext) + self.confirmed_frame_statistics_dict = ft.read_dict(self.dict_confirmed_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_data(), confirmed frame statistics read:') dt.print_dict(self.confirmed_frame_statistics_dict, indent=4) # Heliostats per video frame. - print( - 'In VideoTracks.read_data(), reading confirmed heliostats per video frame: ', - self.phpvf_dir_body_ext, - ) + print('In VideoTracks.read_data(), reading confirmed heliostats per video frame: ', self.phpvf_dir_body_ext) self.phpvf_dict = ft.read_dict(self.phpvf_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_data(), confirmed heliostats per video frame read:') dt.print_dict(self.phpvf_dict, max_keys=12, max_value_length=200, indent=4) # Points per video frame. - print( - 'In VideoTracks.read_data(), reading confirmed points per video frame: ', - self.pppvf_dir_body_ext, - ) + print('In VideoTracks.read_data(), reading confirmed points per video frame: ', self.pppvf_dir_body_ext) self.pppvf_dict = ft.read_dict(self.pppvf_dir_body_ext) # Confirm what was read. print('In VideoTracks.read_data(), confirmed points per video frame read:') @@ -581,17 +460,14 @@ def read_data(self): def render(self): print( - 'In VideoTracks.render(), self.output_construction_projected_dir=', - self.output_construction_projected_dir, + 'In VideoTracks.render(), self.output_construction_projected_dir=', self.output_construction_projected_dir ) # ?? SCFFOLDING RCB -- TEMPORARY print( - 'In VideoTracks.render(), self.output_construction_confirmed_dir=', - self.output_construction_confirmed_dir, + 'In VideoTracks.render(), self.output_construction_confirmed_dir=', self.output_construction_confirmed_dir ) # ?? SCFFOLDING RCB -- TEMPORARY # Projected. if ( - self.render_control_projected.draw_video_tracks - and self.generated_video_projected_tracks + self.render_control_projected.draw_video_tracks and self.generated_video_projected_tracks ): # Don't render unless we generated. self.render_aux( self.video_projected_tracks_fnxl, @@ -602,8 +478,7 @@ def render(self): ) # Confirmed. if ( - self.render_control_confirmed.draw_video_tracks - and self.generated_video_confirmed_tracks + self.render_control_confirmed.draw_video_tracks and self.generated_video_confirmed_tracks ): # Don't render unless we generated. self.render_aux( self.video_confirmed_tracks_fnxl, @@ -621,14 +496,9 @@ def render_aux( render_control, projected_or_confirmed_str, ): + print('In VideoTracks.render_aux(), rendering video ' + projected_or_confirmed_str + ' tracks...') print( - 'In VideoTracks.render_aux(), rendering video ' - + projected_or_confirmed_str - + ' tracks...' - ) - print( - 'In VideoTracks.render_aux(), output_construction_dir=', - output_construction_dir, + 'In VideoTracks.render_aux(), output_construction_dir=', output_construction_dir ) # ?? SCFFOLDING RCB -- TEMPORARY # Descriptive strings. title_name = projected_or_confirmed_str.capitalize() + ' Corners' @@ -637,9 +507,7 @@ def render_aux( fig_suffix = '_video_' + projected_or_confirmed_str + '_tracks_fig' delete_suffix = '.JPG' + fig_suffix + '.png' # Prepare directory for frames. - upc.prepare_render_directory( - output_construction_dir, delete_suffix, render_control - ) + upc.prepare_render_directory(output_construction_dir, delete_suffix, render_control) # Setup annotation styles. style_dict = {} style_dict['point_seq'] = rcps.marker( @@ -672,48 +540,26 @@ def render_aux( crop=render_control.video_tracks_crop, ) - print( - 'In VideoTracks.render_aux() for ' - + projected_or_confirmed_str - + ' tracks, draw_frames() has returned.' - ) + print('In VideoTracks.render_aux() for ' + projected_or_confirmed_str + ' tracks, draw_frames() has returned.') # Prepare directory for video. ft.create_directories_if_necessary(self.output_render_dir) # Construct the video. - print( - 'In VideoTracks.render_aux(), constructing video of ' - + projected_or_confirmed_str - + ' tracks...' - ) + print('In VideoTracks.render_aux(), constructing video of ' + projected_or_confirmed_str + ' tracks...') vm.construct_video(output_construction_dir, output_video_dir_body) - print( - 'In VideoTracks.render_aux(), ' - + projected_or_confirmed_str - + ' tracks video construction finished.' - ) + print('In VideoTracks.render_aux(), ' + projected_or_confirmed_str + ' tracks video construction finished.') print() # Check to see if all the frames have been constructed. - all_written_frame_file_list = ft.files_in_directory( - self.output_construction_dir, sort=True - ) + all_written_frame_file_list = ft.files_in_directory(self.output_construction_dir, sort=True) # Confirm what was read. max_print_files = 12 print('In VideoTracks.render_aux(), self.all_written_frame_file_list:') - for frame_file in all_written_frame_file_list[ - 0 : min(max_print_files, len(all_written_frame_file_list)) - ]: + for frame_file in all_written_frame_file_list[0 : min(max_print_files, len(all_written_frame_file_list))]: print('In VideoTracks.render_aux() ', frame_file) print('...') - print( - 'In VideoTracks.render_aux(), len(self.all_frame_file_list) =', - len(self.all_frame_file_list), - ) - print( - 'In VideoTracks.render(), len(all_written_frame_file_list) =', - len(all_written_frame_file_list), - ) + print('In VideoTracks.render_aux(), len(self.all_frame_file_list) =', len(self.all_frame_file_list)) + print('In VideoTracks.render(), len(all_written_frame_file_list) =', len(all_written_frame_file_list)) print() @@ -740,8 +586,7 @@ def render_aux( ) # Input/output sources. input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_key_projected_tracks_dir = ( experiment_dir() @@ -755,20 +600,15 @@ def render_aux( experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/080c_FramesNoDuplicates/mavic_zoom/frames/' ) - input_frame_id_format = ( - '06d' # Note different from format used in ffmpeg call, which is '.%06d' - ) + input_frame_id_format = '06d' # Note different from format used in ffmpeg call, which is '.%06d' output_data_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/160_VideoTracks/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/160_VideoTracks/mavic_zoom/data/' ) output_render_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/160_VideoTracks/mavic_zoom/render/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/160_VideoTracks/mavic_zoom/render/' ) output_construction_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/160c_VideoTracks/mavic_zoom/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/160c_VideoTracks/mavic_zoom/' ) # Render control. # render_control_projected = rcvt.default(color='m') diff --git a/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py b/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py index 870ab0f9..61c4f28d 100644 --- a/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py +++ b/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py @@ -48,22 +48,14 @@ def __init__( ): # Flags to control rendering on this run, for the confirmed data. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In HeliostatTracks.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In HeliostatTracks.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In HeliostatTracks.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In HeliostatTracks.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In HeliostatTracks.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In HeliostatTracks.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. @@ -76,12 +68,8 @@ def __init__( self.input_video_dir = input_video_dir self.input_video_body = input_video_body self.input_video_ext = input_video_ext - self.input_video_projected_tracks_dir_body_ext = ( - input_video_projected_tracks_dir_body_ext - ) - self.input_video_confirmed_tracks_dir_body_ext = ( - input_video_confirmed_tracks_dir_body_ext - ) + self.input_video_projected_tracks_dir_body_ext = input_video_projected_tracks_dir_body_ext + self.input_video_confirmed_tracks_dir_body_ext = input_video_confirmed_tracks_dir_body_ext self.input_frame_dir = input_frame_dir self.input_frame_id_format = input_frame_id_format self.output_data_dir = output_data_dir @@ -93,22 +81,14 @@ def __init__( # Found heliostat tracks file name. # Projected. - self.heliostat_projected_tracks_body = ( - self.input_video_body + '_heliostat_projected_tracks_nfxl' - ) - self.heliostat_projected_tracks_body_ext = ( - self.heliostat_projected_tracks_body + '.csv' - ) + self.heliostat_projected_tracks_body = self.input_video_body + '_heliostat_projected_tracks_nfxl' + self.heliostat_projected_tracks_body_ext = self.heliostat_projected_tracks_body + '.csv' self.heliostat_projected_tracks_dir_body_ext = os.path.join( self.output_data_dir, self.heliostat_projected_tracks_body_ext ) # Confirmed. - self.heliostat_confirmed_tracks_body = ( - self.input_video_body + '_heliostat_confirmed_tracks_nfxl' - ) - self.heliostat_confirmed_tracks_body_ext = ( - self.heliostat_confirmed_tracks_body + '.csv' - ) + self.heliostat_confirmed_tracks_body = self.input_video_body + '_heliostat_confirmed_tracks_nfxl' + self.heliostat_confirmed_tracks_body_ext = self.heliostat_confirmed_tracks_body + '.csv' self.heliostat_confirmed_tracks_dir_body_ext = os.path.join( self.output_data_dir, self.heliostat_confirmed_tracks_body_ext ) @@ -128,48 +108,28 @@ def __init__( ) # Output construction frame directories. - self.output_construction_projected_dir = os.path.join( - self.output_construction_dir, 'projected' - ) - self.output_construction_confirmed_dir = os.path.join( - self.output_construction_dir, 'confirmed' - ) + self.output_construction_projected_dir = os.path.join(self.output_construction_dir, 'projected') + self.output_construction_confirmed_dir = os.path.join(self.output_construction_dir, 'confirmed') # Summary statistics file name. # Projected. - self.dict_projected_body = ( - self.input_video_body + '_heliostat_projected_tracks_statistics' - ) + self.dict_projected_body = self.input_video_body + '_heliostat_projected_tracks_statistics' self.dict_projected_body_ext = self.dict_projected_body + '.csv' - self.dict_projected_dir_body_ext = os.path.join( - self.output_data_dir, self.dict_projected_body_ext - ) + self.dict_projected_dir_body_ext = os.path.join(self.output_data_dir, self.dict_projected_body_ext) # Conifrmed. - self.dict_confirmed_body = ( - self.input_video_body + '_heliostat_confirmed_tracks_statistics' - ) + self.dict_confirmed_body = self.input_video_body + '_heliostat_confirmed_tracks_statistics' self.dict_confirmed_body_ext = self.dict_confirmed_body + '.csv' - self.dict_confirmed_dir_body_ext = os.path.join( - self.output_data_dir, self.dict_confirmed_body_ext - ) + self.dict_confirmed_dir_body_ext = os.path.join(self.output_data_dir, self.dict_confirmed_body_ext) # Video frames per heliostat dictionary name. # Projected. - self.vfpph_body = ( - self.input_video_body + '_video_projected_frames_per_heliostat' - ) + self.vfpph_body = self.input_video_body + '_video_projected_frames_per_heliostat' self.vfpph_body_ext = self.vfpph_body + '.csv' - self.vfpph_dir_body_ext = os.path.join( - self.output_data_dir, self.vfpph_body_ext - ) + self.vfpph_dir_body_ext = os.path.join(self.output_data_dir, self.vfpph_body_ext) # Confirmed. - self.vfpch_body = ( - self.input_video_body + '_video_confirmed_frames_per_heliostat' - ) + self.vfpch_body = self.input_video_body + '_video_confirmed_frames_per_heliostat' self.vfpch_body_ext = self.vfpch_body + '.csv' - self.vfpch_dir_body_ext = os.path.join( - self.output_data_dir, self.vfpch_body_ext - ) + self.vfpch_dir_body_ext = os.path.join(self.output_data_dir, self.vfpch_body_ext) # Load video tracks files. # Projected. @@ -178,41 +138,29 @@ def __init__( self.input_video_projected_tracks_dir_body_ext, ) self.video_projected_tracks_fnxl = fnxl.FrameNameXyList() - self.video_projected_tracks_fnxl.load( - self.input_video_projected_tracks_dir_body_ext - ) + self.video_projected_tracks_fnxl.load(self.input_video_projected_tracks_dir_body_ext) # Confirm what was read. print('In HeliostatTracks.__init__(), projected video tracks read:') - self.video_projected_tracks_fnxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + self.video_projected_tracks_fnxl.print(max_keys=12, max_value_length=200, indent=4) # Confirmed. print( 'In HeliostatTracks.__init__(), reading confirmed video tracks file: ', self.input_video_confirmed_tracks_dir_body_ext, ) self.video_confirmed_tracks_fnxl = fnxl.FrameNameXyList() - self.video_confirmed_tracks_fnxl.load( - self.input_video_confirmed_tracks_dir_body_ext - ) + self.video_confirmed_tracks_fnxl.load(self.input_video_confirmed_tracks_dir_body_ext) # Confirm what was read. print('In HeliostatTracks.__init__(), confirmed video tracks read:') - self.video_confirmed_tracks_fnxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + self.video_confirmed_tracks_fnxl.print(max_keys=12, max_value_length=200, indent=4) # Fetch a list of all frame ids in the video (not just key frames). # The corresponding frame_ids are not necessarily in sequential order, because # we previously removed spurious duplicate frames. - self.all_frame_file_list = ft.files_in_directory( - self.input_frame_dir, sort=True - ) + self.all_frame_file_list = ft.files_in_directory(self.input_frame_dir, sort=True) # Confirm what was read. max_print_files = 12 print('In HeliostatTracks.__init__(), self.all_frame_file_list:') - for frame_file in self.all_frame_file_list[ - 0 : min(max_print_files, len(self.all_frame_file_list)) - ]: + for frame_file in self.all_frame_file_list[0 : min(max_print_files, len(self.all_frame_file_list))]: print('In HeliostatTracks.__init__() ', frame_file) print('...') @@ -242,19 +190,13 @@ def construct_and_save_heliostat_tracks(self): self.generated_heliostat_projected_tracks = False self.generated_heliostat_confirmed_tracks = False # Projected. - self.construct_and_save_heliostat_tracks_aux( - self.video_projected_tracks_fnxl, 'projected' - ) + self.construct_and_save_heliostat_tracks_aux(self.video_projected_tracks_fnxl, 'projected') self.generated_heliostat_projected_tracks = True # Confirmed. - self.construct_and_save_heliostat_tracks_aux( - self.video_confirmed_tracks_fnxl, 'confirmed' - ) + self.construct_and_save_heliostat_tracks_aux(self.video_confirmed_tracks_fnxl, 'confirmed') self.generated_heliostat_confirmed_tracks = True - def construct_and_save_heliostat_tracks_aux( - self, video_tracks_fnxl, projected_or_confirmed_str - ): + def construct_and_save_heliostat_tracks_aux(self, video_tracks_fnxl, projected_or_confirmed_str): print( 'In HeliostatTracks.construct_and_save_heliostat_tracks_aux(), constructing ' + projected_or_confirmed_str @@ -268,9 +210,7 @@ def construct_and_save_heliostat_tracks_aux( heliostat_tracks_nfxl.add_FrameNameXyList(video_tracks_fnxl) # Summarize construction result. - print( - 'In HeliostatTracks.construct_and_save_heliostat_tracks_aux(), constructed heliostat_tracks_nfxl:' - ) + print('In HeliostatTracks.construct_and_save_heliostat_tracks_aux(), constructed heliostat_tracks_nfxl:') heliostat_tracks_nfxl.print(indent=4) # Write heliostat tracks file. @@ -300,8 +240,7 @@ def save_heliostat_tracks(self, heliostat_tracks_nfxl, projected_or_confirmed_st print('ERROR: ' + msg) raise ValueError(msg) print( - 'In HeliostatTracks.save_heliostat_tracks(), writing heliostat track file: ', - heliostat_tracks_dir_body_ext, + 'In HeliostatTracks.save_heliostat_tracks(), writing heliostat track file: ', heliostat_tracks_dir_body_ext ) ft.create_directories_if_necessary(self.output_data_dir) heliostat_tracks_nfxl.save(heliostat_tracks_dir_body_ext) @@ -327,13 +266,9 @@ def save_data(self, heliostat_tracks_nfxl, projected_or_confirmed_str): raise ValueError(msg) # Statistics. summary_dict = {} - summary_dict['n_heliostat_track_frames'] = ( - heliostat_tracks_nfxl.number_of_frames() - ) + summary_dict['n_heliostat_track_frames'] = heliostat_tracks_nfxl.number_of_frames() print( - 'In HeliostatTracks.save_data(), writing key frame ' - + projected_or_confirmed_str - + ' summary statistics...' + 'In HeliostatTracks.save_data(), writing key frame ' + projected_or_confirmed_str + ' summary statistics...' ) ft.write_dict_file( 'heliostat ' + projected_or_confirmed_str + ' tracks summary statistics', @@ -344,14 +279,10 @@ def save_data(self, heliostat_tracks_nfxl, projected_or_confirmed_str): # Video frames per heliostat. video_frames_per_heliostat_dict = heliostat_tracks_nfxl.frames_per_heliostat() print( - 'In HeliostatTracks.save_data(), writing video frames per ' - + projected_or_confirmed_str - + ' heliostat:', + 'In HeliostatTracks.save_data(), writing video frames per ' + projected_or_confirmed_str + ' heliostat:', os.path.join(self.output_data_dir, vfph_body_ext), ) - ft.write_dict_file( - None, self.output_data_dir, vfph_body, video_frames_per_heliostat_dict - ) + ft.write_dict_file(None, self.output_data_dir, vfph_body, video_frames_per_heliostat_dict) # LOAD RESULT @@ -362,79 +293,47 @@ def read_heliostat_tracks(self): self.heliostat_projected_tracks_dir_body_ext, ) self.heliostat_projected_tracks_nfxl = nfxl.NameFrameXyList() - self.heliostat_projected_tracks_nfxl.load( - self.heliostat_projected_tracks_dir_body_ext - ) + self.heliostat_projected_tracks_nfxl.load(self.heliostat_projected_tracks_dir_body_ext) # Confirm what was read. - print( - 'In HeliostatTracks.read_heliostat_tracks(), heliostat projected tracks read:' - ) - self.heliostat_projected_tracks_nfxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + print('In HeliostatTracks.read_heliostat_tracks(), heliostat projected tracks read:') + self.heliostat_projected_tracks_nfxl.print(max_keys=12, max_value_length=200, indent=4) # Confirmed. print( 'In HeliostatTracks.read_heliostat_tracks(), reading heliostat confirmed tracks file: ', self.heliostat_confirmed_tracks_dir_body_ext, ) self.heliostat_confirmed_tracks_nfxl = nfxl.NameFrameXyList() - self.heliostat_confirmed_tracks_nfxl.load( - self.heliostat_confirmed_tracks_dir_body_ext - ) + self.heliostat_confirmed_tracks_nfxl.load(self.heliostat_confirmed_tracks_dir_body_ext) # Confirm what was read. - print( - 'In HeliostatTracks.read_heliostat_tracks(), heliostat confirmed tracks read:' - ) - self.heliostat_confirmed_tracks_nfxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + print('In HeliostatTracks.read_heliostat_tracks(), heliostat confirmed tracks read:') + self.heliostat_confirmed_tracks_nfxl.print(max_keys=12, max_value_length=200, indent=4) def read_data(self): # Projected. # Statistics. - print( - 'In HeliostatTracks.read_data(), reading projected frame statistics: ', - self.dict_projected_dir_body_ext, - ) - self.projected_frame_statistics_dict = ft.read_dict( - self.dict_projected_dir_body_ext - ) + print('In HeliostatTracks.read_data(), reading projected frame statistics: ', self.dict_projected_dir_body_ext) + self.projected_frame_statistics_dict = ft.read_dict(self.dict_projected_dir_body_ext) # Confirm what was read. print('In HeliostatTracks.read_data(), projected frame statistics read:') dt.print_dict(self.projected_frame_statistics_dict, indent=4) # Heliostats per video frame. - print( - 'In HeliostatTracks.read_data(), reading projected heliostats per video frame: ', - self.vfpph_dir_body_ext, - ) + print('In HeliostatTracks.read_data(), reading projected heliostats per video frame: ', self.vfpph_dir_body_ext) self.vfpph_dict = ft.read_dict(self.vfpph_dir_body_ext) # Confirm what was read. - print( - 'In HeliostatTracks.read_data(), projected heliostats per video frame read:' - ) + print('In HeliostatTracks.read_data(), projected heliostats per video frame read:') dt.print_dict(self.vfpph_dict, max_keys=12, max_value_length=200, indent=4) # Confirmed. # Statistics. - print( - 'In HeliostatTracks.read_data(), reading confirmed frame statistics: ', - self.dict_confirmed_dir_body_ext, - ) - self.confirmed_frame_statistics_dict = ft.read_dict( - self.dict_confirmed_dir_body_ext - ) + print('In HeliostatTracks.read_data(), reading confirmed frame statistics: ', self.dict_confirmed_dir_body_ext) + self.confirmed_frame_statistics_dict = ft.read_dict(self.dict_confirmed_dir_body_ext) # Confirm what was read. print('In HeliostatTracks.read_data(), confirmed frame statistics read:') dt.print_dict(self.confirmed_frame_statistics_dict, indent=4) # Heliostats per video frame. - print( - 'In HeliostatTracks.read_data(), reading confirmed heliostats per video frame: ', - self.vfpph_dir_body_ext, - ) + print('In HeliostatTracks.read_data(), reading confirmed heliostats per video frame: ', self.vfpph_dir_body_ext) self.vfpph_dict = ft.read_dict(self.vfpph_dir_body_ext) # Confirm what was read. - print( - 'In HeliostatTracks.read_data(), confirmed heliostats per video frame read:' - ) + print('In HeliostatTracks.read_data(), confirmed heliostats per video frame read:') dt.print_dict(self.vfpph_dict, max_keys=12, max_value_length=200, indent=4) # RENDER RESULT @@ -450,8 +349,7 @@ def render(self): ) # ?? SCAFFOLDING RCB -- TEMPORARY # Projected. if ( - self.render_control_projected.draw_heliostat_tracks - and self.generated_heliostat_projected_tracks + self.render_control_projected.draw_heliostat_tracks and self.generated_heliostat_projected_tracks ): # Don't render unless we generated. self.render_aux( self.heliostat_projected_tracks_nfxl, @@ -461,8 +359,7 @@ def render(self): ) # Confirmed. if ( - self.render_control_confirmed.draw_heliostat_tracks - and self.generated_heliostat_confirmed_tracks + self.render_control_confirmed.draw_heliostat_tracks and self.generated_heliostat_confirmed_tracks ): # Don't render unless we generated. self.render_aux( self.heliostat_confirmed_tracks_nfxl, @@ -471,21 +368,10 @@ def render(self): 'confirmed', ) - def render_aux( - self, - heliostat_tracks_nfxl, - output_construction_dir, - render_control, - projected_or_confirmed_str, - ): + def render_aux(self, heliostat_tracks_nfxl, output_construction_dir, render_control, projected_or_confirmed_str): + print('In HeliostatTracks.render_aux(), rendering heliostat ' + projected_or_confirmed_str + ' tracks...') print( - 'In HeliostatTracks.render_aux(), rendering heliostat ' - + projected_or_confirmed_str - + ' tracks...' - ) - print( - 'In HeliostatTracks.render_aux(), output_construction_dir=', - output_construction_dir, + 'In HeliostatTracks.render_aux(), output_construction_dir=', output_construction_dir ) # ?? SCAFFOLDING RCB -- TEMPORARY print('WARNING: In HeliostatTracks.render_aux(), not implemented yet.') @@ -503,8 +389,7 @@ def render_aux( ) # Input/output sources. input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_video_projected_tracks_dir_body_ext = ( experiment_dir() @@ -518,9 +403,7 @@ def render_aux( experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/080c_FramesNoDuplicates/mavic_zoom/frames/' ) - input_frame_id_format = ( - '06d' # Note different from format used in ffmpeg call, which is '.%06d' - ) + input_frame_id_format = '06d' # Note different from format used in ffmpeg call, which is '.%06d' output_data_dir = ( experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/Small_170_HeliostatTracks/mavic_zoom/data/' diff --git a/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py b/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py index 902fe117..6a25f453 100644 --- a/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py +++ b/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py @@ -66,22 +66,14 @@ def __init__( ): # Flags to control rendering on this run, for the confirmed, undistorted data. # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In Heliostats3d.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In Heliostats3d.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In Heliostats3d.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In Heliostats3d.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In Heliostats3d.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In Heliostats3d.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. @@ -89,21 +81,13 @@ def __init__( self.specific_frame_ids = specific_frame_ids self.single_processor = single_processor self.log_dir_body_ext = log_dir_body_ext - self.camera_matrix = ( - utils.CameraMatrix - ) # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS - self.distortion_coefficients = ( - utils.DistCoefs - ) # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS - self.zero_distortion_coefficients = ( - utils.ZeroDistCoefs - ) # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS + self.camera_matrix = utils.CameraMatrix # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS + self.distortion_coefficients = utils.DistCoefs # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS + self.zero_distortion_coefficients = utils.ZeroDistCoefs # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS # self.reconstruct_executable_path = '/Code/ufacet_code/Reconstruct/bin/reconstruct_main.out' # ?? SCAFFOLDING RCB -- RE-EVALUATE THIS. PASS IN? # Input/output sources. self.specifications = specifications - self.theoretical_flat_heliostat_dir_body_ext = ( - theoretical_flat_heliostat_dir_body_ext - ) + self.theoretical_flat_heliostat_dir_body_ext = theoretical_flat_heliostat_dir_body_ext self.theoretical_flat_heliostat_dict = uh3a.read_txt_file_to_heliostat( self.theoretical_flat_heliostat_dir_body_ext, self.specifications ) @@ -116,12 +100,8 @@ def __init__( self.input_video_ext = input_video_ext self.input_frame_dir = input_frame_dir self.input_frame_id_format = input_frame_id_format - self.input_heliostat_projected_tracks_dir_body_ext = ( - input_heliostat_projected_tracks_dir_body_ext - ) - self.input_heliostat_confirmed_tracks_dir_body_ext = ( - input_heliostat_confirmed_tracks_dir_body_ext - ) + self.input_heliostat_projected_tracks_dir_body_ext = input_heliostat_projected_tracks_dir_body_ext + self.input_heliostat_confirmed_tracks_dir_body_ext = input_heliostat_confirmed_tracks_dir_body_ext self.output_data_dir = output_data_dir self.output_render_dir = output_render_dir self.output_construction_dir = output_construction_dir @@ -140,31 +120,21 @@ def __init__( # self.output_heliostat_confirmed_undistorted_corner_2d_trajectories_dir = os.path.join(self.output_data_dir, 'confirmed_undistorted_corner_2d_trajectories') # Output ideal heliostat models and their analysis plots. - self.output_ideal_model_dir = os.path.join( - self.output_data_dir, 'IdealSiteWide' - ) + self.output_ideal_model_dir = os.path.join(self.output_data_dir, 'IdealSiteWide') # Flat. - self.theoretical_flat_heliostat_dir_body_ext = ( - self.save_and_analyze_flat_heliostat( - self.specifications.heliostat_design_name - ) + self.theoretical_flat_heliostat_dir_body_ext = self.save_and_analyze_flat_heliostat( + self.specifications.heliostat_design_name ) # Nearest. (self.nearest_smooth_dir_body_ext, self.nearest_design_dir_body_ext) = ( - self.construct_save_and_analyze_key_heliostats( - self.specifications.heliostat_design_name, 'Nearest' - ) + self.construct_save_and_analyze_key_heliostats(self.specifications.heliostat_design_name, 'Nearest') ) # Farthest. (self.farthest_smooth_dir_body_ext, self.farthest_design_dir_body_ext) = ( - self.construct_save_and_analyze_key_heliostats( - self.specifications.heliostat_design_name, 'Farthest' - ) + self.construct_save_and_analyze_key_heliostats(self.specifications.heliostat_design_name, 'Farthest') ) # Demonstration heliostat. - self.demonstration_dir_body_ext = ( - self.construct_save_and_analyze_demonstration_heliostat_corner_xyz_list() - ) + self.demonstration_dir_body_ext = self.construct_save_and_analyze_demonstration_heliostat_corner_xyz_list() # Output heliostat 3-d corner directories. # Projected. @@ -184,26 +154,18 @@ def __init__( # Output heliostat 3-d cnstruction directories. # Projected. - self.output_heliostat_projected_distorted_construct_corners_3d_dir = ( - os.path.join( - self.output_construction_dir, 'projected_distorted_corners_3dc' - ) + self.output_heliostat_projected_distorted_construct_corners_3d_dir = os.path.join( + self.output_construction_dir, 'projected_distorted_corners_3dc' ) - self.output_heliostat_projected_undistorted_construct_corners_3d_dir = ( - os.path.join( - self.output_construction_dir, 'projected_undistorted_corners_3dc' - ) + self.output_heliostat_projected_undistorted_construct_corners_3d_dir = os.path.join( + self.output_construction_dir, 'projected_undistorted_corners_3dc' ) # Confirmed. - self.output_heliostat_confirmed_distorted_construct_corners_3d_dir = ( - os.path.join( - self.output_construction_dir, 'confirmed_distorted_corners_3dc' - ) + self.output_heliostat_confirmed_distorted_construct_corners_3d_dir = os.path.join( + self.output_construction_dir, 'confirmed_distorted_corners_3dc' ) - self.output_heliostat_confirmed_undistorted_construct_corners_3d_dir = ( - os.path.join( - self.output_construction_dir, 'confirmed_undistorted_corners_3dc' - ) + self.output_heliostat_confirmed_undistorted_construct_corners_3d_dir = os.path.join( + self.output_construction_dir, 'confirmed_undistorted_corners_3dc' ) # Load video tracks files. @@ -220,14 +182,10 @@ def __init__( self.input_heliostat_confirmed_tracks_dir_body_ext, ) self.heliostat_confirmed_tracks_nfxl = nfxl.NameFrameXyList() - self.heliostat_confirmed_tracks_nfxl.load( - self.input_heliostat_confirmed_tracks_dir_body_ext - ) + self.heliostat_confirmed_tracks_nfxl.load(self.input_heliostat_confirmed_tracks_dir_body_ext) # Confirm what was read. print('In Heliostats3d.__init__(), heliostat confirmed tracks read:') - self.heliostat_confirmed_tracks_nfxl.print( - max_keys=12, max_value_length=200, indent=4 - ) + self.heliostat_confirmed_tracks_nfxl.print(max_keys=12, max_value_length=200, indent=4) # For each NameFrameXyList object, produce a 3-d heliostat shape estimate. self.construct_and_save_heliostat_corners_3d() @@ -242,9 +200,7 @@ def save_and_analyze_flat_heliostat(self, heliostat_design_name): # Save. flat_name = heliostat_design_name + 'Flat' flat_output_dir = os.path.join(self.output_ideal_model_dir, flat_name) - flat_dir_body_ext = uh3a.save_heliostat_3d( - flat_name, self.theoretical_flat_heliostat_xyz_list, flat_output_dir - ) + flat_dir_body_ext = uh3a.save_heliostat_3d(flat_name, self.theoretical_flat_heliostat_xyz_list, flat_output_dir) # Analyze. # Viewpoint for projection analysis plots for ideal heliostats. # @@ -278,20 +234,14 @@ def save_and_analyze_flat_heliostat(self, heliostat_design_name): # Return. return flat_dir_body_ext - def construct_save_and_analyze_key_heliostats( - self, heliostat_design_name, key_name - ): + def construct_save_and_analyze_key_heliostats(self, heliostat_design_name, key_name): # Smooth. # Facets are at z positions corresponding to an uninterrupted smooth paraboloid. # Construct. if key_name == 'Nearest': - focal_length = self.specifications.design_focal_length( - self.specifications.nearest_heliostat_xyz() - ) + focal_length = self.specifications.design_focal_length(self.specifications.nearest_heliostat_xyz()) elif key_name == 'Farthest': - focal_length = self.specifications.design_focal_length( - self.specifications.farthest_heliostat_xyz() - ) + focal_length = self.specifications.design_focal_length(self.specifications.farthest_heliostat_xyz()) else: print( 'ERROR: In Heliostats3d.construct_save_and_analyze_key_heliostats(), unexpected key_name="' @@ -299,17 +249,11 @@ def construct_save_and_analyze_key_heliostats( + '" encountered (1).' ) assert False - smooth_xyz_list = ( - self.specifications.smooth_heliostat_corner_xyz_list_given_focal_length( - focal_length - ) - ) + smooth_xyz_list = self.specifications.smooth_heliostat_corner_xyz_list_given_focal_length(focal_length) # Save. smooth_name = heliostat_design_name + key_name + 'Smooth' smooth_output_dir = os.path.join(self.output_ideal_model_dir, smooth_name) - smooth_dir_body_ext = uh3a.save_heliostat_3d( - smooth_name, smooth_xyz_list, smooth_output_dir - ) + smooth_dir_body_ext = uh3a.save_heliostat_3d(smooth_name, smooth_xyz_list, smooth_output_dir) # Analyze. camera_rvec = np.array([-2.68359887, -0.2037837, 0.215282]).reshape( 3, 1 @@ -346,9 +290,7 @@ def construct_save_and_analyze_key_heliostats( # Save. design_name = heliostat_design_name + key_name + 'Design' design_output_dir = os.path.join(self.output_ideal_model_dir, design_name) - design_dir_body_ext = uh3a.save_heliostat_3d( - design_name, design_xyz_list, design_output_dir - ) + design_dir_body_ext = uh3a.save_heliostat_3d(design_name, design_xyz_list, design_output_dir) # Analyze. camera_rvec = np.array([-2.68359887, -0.2037837, 0.215282]).reshape( 3, 1 @@ -383,10 +325,8 @@ def construct_save_and_analyze_demonstration_heliostat_corner_xyz_list(self): demonstration_heliostat_spec[4]['center_x'] += 0.25 # m demonstration_heliostat_spec[4]['center_y'] += 0.25 # m demonstration_heliostat_spec[22]['center_z'] += -0.0508 # m - demonstration_heliostat_corner_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - demonstration_heliostat_spec - ) + demonstration_heliostat_corner_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( + demonstration_heliostat_spec ) # Save. output_hel_name = 'Demonstration' @@ -481,9 +421,7 @@ def construct_and_save_heliostat_corners_3d_aux( infer_dict['projected_or_confirmed_str'] = projected_or_confirmed_str infer_dict['distorted_or_undistorted_str'] = distorted_or_undistorted_str # infer_dict['output_corner_2d_trajectories_dir'] = output_corner_2d_trajectories_dir - infer_dict['output_construct_corners_3d_dir'] = ( - output_construct_corners_3d_dir - ) + infer_dict['output_construct_corners_3d_dir'] = output_construct_corners_3d_dir infer_dict['render_control'] = render_control list_of_infer_dicts.append(infer_dict) @@ -494,33 +432,21 @@ def construct_and_save_heliostat_corners_3d_aux( ) list_of_result_hi3ds = [] for infer_dict in list_of_infer_dicts: - list_of_result_hi3ds.append( - self.execute_heliostat_3d_inference(infer_dict) - ) + list_of_result_hi3ds.append(self.execute_heliostat_3d_inference(infer_dict)) else: print( 'In Heliostats3d.construct_and_save_heliostat_corners_3d_aux(), starting heliostat 3-d inference (multi-processor)...' ) - logger = logt.multiprocessing_logger( - self.log_dir_body_ext, level=logging.INFO - ) - logger.info( - '================================= Execution =================================' - ) + logger = logt.multiprocessing_logger(self.log_dir_body_ext, level=logging.INFO) + logger.info('================================= Execution =================================') with Pool(25) as pool: - list_of_result_hi3ds = pool.map( - self.execute_heliostat_3d_inference, list_of_infer_dicts - ) + list_of_result_hi3ds = pool.map(self.execute_heliostat_3d_inference, list_of_infer_dicts) - print( - 'In Heliostats3d.construct_and_save_heliostat_corners_3d_aux(), heliostat 3-d inference done.' - ) + print('In Heliostats3d.construct_and_save_heliostat_corners_3d_aux(), heliostat 3-d inference done.') # Save the result. for hi3d in list_of_result_hi3ds: - if ( - hi3d is not None - ): # We might have skipped some heliostats, for example for debuggging. + if hi3d is not None: # We might have skipped some heliostats, for example for debuggging. # Output file name. output_corners_3d_dir_body_ext = uh3a.corners_3d_dir_body_ext( self.input_video_body, @@ -568,9 +494,7 @@ def execute_heliostat_3d_inference(self, infer_dict): # dt.print_dict(corner_trajectory_dict, indent=4) # ?? SCAFFOLDING RCB -- TEMPORARY # Fetch the list of [frame xy_list] pairs for this heliostat. - list_of_frame_id_xy_lists = heliostat_tracks_nfxl.list_of_frame_xy_lists( - hel_name - ) + list_of_frame_id_xy_lists = heliostat_tracks_nfxl.list_of_frame_xy_lists(hel_name) # Remove lens distortion, if desired. if distorted_or_undistorted_str == 'distorted': list_of_frame_id_xy_lists_to_process = list_of_frame_id_xy_lists @@ -695,11 +619,7 @@ def undistort_xy_list(self, input_xy_list): camera_matrix_2 = self.camera_matrix.copy() xy_array = np.array(input_xy_list).reshape(-1, 2) undistorted_xy_array = cv.undistortPoints( - xy_array, - self.camera_matrix, - self.distortion_coefficients, - None, - camera_matrix_2, + xy_array, self.camera_matrix, self.distortion_coefficients, None, camera_matrix_2 ) undistorted_xy_list = undistorted_xy_array.reshape(-1, 2).tolist() # Points with coordinates [1,-1] are flags for "None" and are expected by OpenCV. Reset these to their original values. @@ -912,16 +832,13 @@ def undistort_xy_list(self, input_xy_list): + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/010_HeliostatModel/data/NSTTF_Facet_Corners.csv' ) input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_frame_dir = ( experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/080c_FramesNoDuplicates/mavic_zoom/frames/' ) - input_frame_id_format = ( - '06d' # Note different from format used in ffmpeg call, which is '.%06d' - ) + input_frame_id_format = '06d' # Note different from format used in ffmpeg call, which is '.%06d' input_video_projected_tracks_dir_body_ext = ( experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/170_HeliostatTracks/mavic_zoom/data/DJI_427t_428_429_heliostat_projected_tracks_nfxl.csv' @@ -931,38 +848,25 @@ def undistort_xy_list(self, input_xy_list): + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/170_HeliostatTracks/mavic_zoom/data/DJI_427t_428_429_heliostat_confirmed_tracks_nfxl.csv' ) output_data_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/180_Heliostats3d/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/180_Heliostats3d/mavic_zoom/data/' ) output_render_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/180_Heliostats3d/mavic_zoom/render/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/180_Heliostats3d/mavic_zoom/render/' ) output_construction_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/180c_Heliostats3d/mavic_zoom/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Construction/20201203/1544_NS_U/180c_Heliostats3d/mavic_zoom/' ) # Render control. - render_control_projected_distorted = rchr.default( - color='m' - ) # ?? SCAFFOLDING RCB -- TEMPORARY - render_control_projected_undistorted = rchr.default( - color='r' - ) # ?? SCAFFOLDING RCB -- TEMPORARY - render_control_confirmed_distorted = rchr.default( - color='c' - ) # ?? SCAFFOLDING RCB -- TEMPORARY - render_control_confirmed_undistorted = rchr.default( - color='b' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + render_control_projected_distorted = rchr.default(color='m') # ?? SCAFFOLDING RCB -- TEMPORARY + render_control_projected_undistorted = rchr.default(color='r') # ?? SCAFFOLDING RCB -- TEMPORARY + render_control_confirmed_distorted = rchr.default(color='c') # ?? SCAFFOLDING RCB -- TEMPORARY + render_control_confirmed_undistorted = rchr.default(color='b') # ?? SCAFFOLDING RCB -- TEMPORARY # render_control_projected_distorted = rchr.fast() # Don't draw frames. # ?? SCAFFOLDING RCB -- TEMPORARY # render_control_projected_undistorted = rchr.fast() # Don't draw frames. # ?? SCAFFOLDING RCB -- TEMPORARY # render_control_confirmed_distorted = rchr.fast() # Don't draw frames. # ?? SCAFFOLDING RCB -- TEMPORARY # render_control_confirmed_undistorted = rchr.fast() # Don't draw frames. # ?? SCAFFOLDING RCB -- TEMPORARY - render_control_confirmed_undistorted = ( - rckfgm.default() - ) # ?? SCAFFOLDING RCB -- TEMPORARY + render_control_confirmed_undistorted = rckfgm.default() # ?? SCAFFOLDING RCB -- TEMPORARY key_frames_object = Heliostats3d( # Execution control. force_construction, diff --git a/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py b/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py index 43862b42..2401d675 100644 --- a/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py +++ b/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py @@ -122,33 +122,21 @@ def __init__( ): # Check input. if (input_video_dir_body_ext == None) or (len(input_video_dir_body_ext) == 0): - raise ValueError( - 'In TrajectoryAnalysis.__init__(), null input_video_dir_body_ext encountered.' - ) + raise ValueError('In TrajectoryAnalysis.__init__(), null input_video_dir_body_ext encountered.') if (output_data_dir == None) or (len(output_data_dir) == 0): - raise ValueError( - 'In TrajectoryAnalysis.__init__(), null output_data_dir encountered.' - ) + raise ValueError('In TrajectoryAnalysis.__init__(), null output_data_dir encountered.') if (output_render_dir == None) or (len(output_render_dir) == 0): - raise ValueError( - 'In TrajectoryAnalysis.__init__(), null output_render_dir encountered.' - ) + raise ValueError('In TrajectoryAnalysis.__init__(), null output_render_dir encountered.') # Parse input video path components. - input_video_dir, input_video_body, input_video_ext = ft.path_components( - input_video_dir_body_ext - ) + input_video_dir, input_video_body, input_video_ext = ft.path_components(input_video_dir_body_ext) # Store input. # Execution control. self.log_dir_body_ext = log_dir_body_ext self.velocity_calculation_offset_fwd_bwd = velocity_calculation_offset_fwd_bwd - self.delta_velocity_angle_xy_peak_threshold = ( - delta_velocity_angle_xy_peak_threshold - ) - self.delta_velocity_angle_xy_non_peak_threshold = ( - delta_velocity_angle_xy_non_peak_threshold - ) + self.delta_velocity_angle_xy_peak_threshold = delta_velocity_angle_xy_peak_threshold + self.delta_velocity_angle_xy_non_peak_threshold = delta_velocity_angle_xy_non_peak_threshold self.turn_overshoot_skip_time = turn_overshoot_skip_time self.scan_establish_velocity_time = scan_establish_velocity_time self.scan_discard_velocity_time = scan_discard_velocity_time @@ -162,12 +150,8 @@ def __init__( self.minimum_gps_pass_number_of_points = minimum_gps_pass_number_of_points self.gps_pass_start_margin = gps_pass_start_margin self.gps_pass_stop_margin = gps_pass_stop_margin - self.maximum_camera_pass_inter_point_distance = ( - maximum_camera_pass_inter_point_distance - ) - self.minimum_camera_pass_inter_point_speed = ( - minimum_camera_pass_inter_point_speed - ) + self.maximum_camera_pass_inter_point_distance = maximum_camera_pass_inter_point_distance + self.minimum_camera_pass_inter_point_speed = minimum_camera_pass_inter_point_speed self.minimum_camera_pass_number_of_points = minimum_camera_pass_number_of_points self.camera_pass_start_margin = camera_pass_start_margin self.camera_pass_stop_margin = camera_pass_stop_margin @@ -199,17 +183,13 @@ def __init__( # Other times. # An hour before flight. self.when_ymdhmsz_minus_1_hour = copy.deepcopy(self.when_ymdhmsz) - self.when_ymdhmsz_minus_1_hour[ - 3 - ] -= 1 # Assume not near midnight, so don't worry about rollover. + self.when_ymdhmsz_minus_1_hour[3] -= 1 # Assume not near midnight, so don't worry about rollover. # A half-hour before flight. self.when_ymdhmsz_minus_30_minutes = copy.deepcopy(self.when_ymdhmsz) if self.when_ymdhmsz_minus_30_minutes[4] > 30: self.when_ymdhmsz_minus_30_minutes[4] -= 30 else: - self.when_ymdhmsz_minus_30_minutes[ - 3 - ] -= 1 # Assume not near midnight, so don't worry about rollover. + self.when_ymdhmsz_minus_30_minutes[3] -= 1 # Assume not near midnight, so don't worry about rollover. self.when_ymdhmsz_minus_30_minutes[4] += 30 # Load solar field data. @@ -223,15 +203,9 @@ def __init__( ) # ?? SCAFFOLDING RCB -- RENAME "_file" TO "_dir_body_ext" THROUGHOUT CODE. # Configuration setup - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) # Drone GPS trajectory from flight log. print('In TrajectoryAnalysis.__init__(), loading GPS log...') @@ -244,12 +218,8 @@ def __init__( # Determine GPS time corresponding to t=0 in the flight log seconds column, and store # in the format used by heliostat aim point calculations. - print( - 'In TrajectoryAnalysis.__init__(), finding GPS time corresponding to flight log time=0...' - ) - self.gps_ymdhmsz_given_flight_log_zero_seconds = ( - self.compute_gps_ymdhmsz_given_flight_log_zero_seconds() - ) + print('In TrajectoryAnalysis.__init__(), finding GPS time corresponding to flight log time=0...') + self.gps_ymdhmsz_given_flight_log_zero_seconds = self.compute_gps_ymdhmsz_given_flight_log_zero_seconds() self.print_gps_flight_log_zero_seconds() # Save GPS time corresponding to flight log zero time. self.gps_ymdhmsz_given_flight_log_zero_seconds_dir_body_ext = ( @@ -262,15 +232,11 @@ def __init__( self.gps_velocity_xy_change_maxima = self.find_gps_velocity_xy_change_maxima(1) # self.print_velocity_xy_change_points() # Save velocity change points. - self.output_gps_velocity_xy_change_minima_dir_body_ext = ( - self.save_gps_velocity_xy_change_points( - self.gps_velocity_xy_change_minima, 'minima' - ) + self.output_gps_velocity_xy_change_minima_dir_body_ext = self.save_gps_velocity_xy_change_points( + self.gps_velocity_xy_change_minima, 'minima' ) - self.output_gps_velocity_xy_change_maxima_dir_body_ext = ( - self.save_gps_velocity_xy_change_points( - self.gps_velocity_xy_change_maxima, 'maxima' - ) + self.output_gps_velocity_xy_change_maxima_dir_body_ext = self.save_gps_velocity_xy_change_points( + self.gps_velocity_xy_change_maxima, 'maxima' ) # Find scan passes. @@ -285,25 +251,17 @@ def __init__( # ?? SCAFFOLDING RCB -- COMMENT: print('In TrajectoryAnalysis.__init__(), finding GPS scan passes...') # Find pairs of a qualifying local maximum followed by the nearest qualifying local minimum. - self.maximum_to_minimum_pass_pair_list = ( - self.find_velocity_xy_change_maximum_to_minimum_scan_passes() - ) - self.minimum_to_maximum_pass_pair_list = ( - self.find_velocity_xy_change_minimum_to_maximum_scan_passes() - ) + self.maximum_to_minimum_pass_pair_list = self.find_velocity_xy_change_maximum_to_minimum_scan_passes() + self.minimum_to_maximum_pass_pair_list = self.find_velocity_xy_change_minimum_to_maximum_scan_passes() # Refine min/max pairs to identify stable scan pass motions. self.gps_scan_passes = self.convert_gps_minima_maxima_passes_to_scan_passes() self.print_gps_scan_pass_summary() # Save GPS scan pass data. - self.maximum_to_minimum_pass_pair_list_dir_body_ext = ( - self.save_gps_velocity_xy_change_pairs( - self.maximum_to_minimum_pass_pair_list, 'maximum_to_minimum' - ) + self.maximum_to_minimum_pass_pair_list_dir_body_ext = self.save_gps_velocity_xy_change_pairs( + self.maximum_to_minimum_pass_pair_list, 'maximum_to_minimum' ) - self.minimum_to_maximum_pass_pair_list_dir_body_ext = ( - self.save_gps_velocity_xy_change_pairs( - self.minimum_to_maximum_pass_pair_list, 'minimum_to_maximum' - ) + self.minimum_to_maximum_pass_pair_list_dir_body_ext = self.save_gps_velocity_xy_change_pairs( + self.minimum_to_maximum_pass_pair_list, 'minimum_to_maximum' ) self.output_gps_scan_passes_dir_body_ext = self.save_gps_scan_passes() @@ -315,44 +273,28 @@ def __init__( # Identify time synchronization between GPS flight log and camera frames. # ?? SCAFFOLDING RCB -- NOTE THAT THIS IS NOT GENERAL. MY CURRENT THINKING IS THAT IT SHOULD PROBABLY BE REPLACED BY AN EARLIER COMPUTATION THAT SUPPORTS KEY FRAME IDENTIFICATION, FOR EXAMPLE. SEE COMMENTS IN ROUTINE. - print( - 'In TrajectoryAnalysis.__init__(), initializing GPS-frame synchronization constants...' - ) + print('In TrajectoryAnalysis.__init__(), initializing GPS-frame synchronization constants...') self.synchronization_pair_list = self.initialize_synchronization_pair_list() - (self.synchronization_slope, self.synchronization_intercept) = ( - self.initialize_synchronization_constants() - ) + (self.synchronization_slope, self.synchronization_intercept) = self.initialize_synchronization_constants() self.print_synchronization_pair_list() - self.synchronization_constants_dir_body_ext = ( - self.save_synchronization_constants() - ) + self.synchronization_constants_dir_body_ext = self.save_synchronization_constants() # Construct heliostat camera passes. - print( - 'In TrajectoryAnalysis.__init__(), constructing heliostat camera passes...' - ) + print('In TrajectoryAnalysis.__init__(), constructing heliostat camera passes...') self.hel_camera_passes_dict = self.construct_heliostat_camera_passes() self.print_hel_camera_passes_dict(max_heliostats=3) self.hel_camera_passes_dict_dir_body_ext = self.save_hel_camera_passes_dict() # Construct data structures for GPS-camera analysis. - print( - 'In TrajectoryAnalysis.__init__(), constructing heliostat GPS-camera analysis...' - ) - self.hel_gps_camera_analysis_dict = ( - self.construct_hel_gps_camera_analysis_dict() - ) + print('In TrajectoryAnalysis.__init__(), constructing heliostat GPS-camera analysis...') + self.hel_gps_camera_analysis_dict = self.construct_hel_gps_camera_analysis_dict() self.print_hel_gps_camera_analysis_dict(max_heliostats=3) - self.hel_gps_camera_analysis_dict_dir_body_ext = ( - self.save_hel_gps_camera_analysis_dict() - ) + self.hel_gps_camera_analysis_dict_dir_body_ext = self.save_hel_gps_camera_analysis_dict() # Figure control information. print('In TrajectoryAnalysis.__init__(), initializing figures...') fm.reset_figure_management() - self.figure_control = rcfg.RenderControlFigure( - tile_array=(2, 1), tile_square=True - ) + self.figure_control = rcfg.RenderControlFigure(tile_array=(2, 1), tile_square=True) self.axis_control_m = rca.meters() # Check pickle files. @@ -376,20 +318,14 @@ def add_velocity_columns_to_flight_log_df(self): Adds velocity, speed, angle, and change information. """ # Construct flight log (x,y,z) coordinates. - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), adding position columns...' - ) - self.flight_log_xyz_list = ( - [] - ) # ?? SCAFFOLDING RCB -- ELIMINATE THIS, IN FAVOR OF THE DATAFRAME. + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), adding position columns...') + self.flight_log_xyz_list = [] # ?? SCAFFOLDING RCB -- ELIMINATE THIS, IN FAVOR OF THE DATAFRAME. empty_column = [np.nan] * len(self.flight_log_df.index) self.flight_log_df['x(m)'] = empty_column self.flight_log_df['y(m)'] = empty_column self.flight_log_df['z(m)'] = empty_column self.flight_log_df['time(sec)'] = empty_column - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...' - ) + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...') for ( idx ) in ( @@ -411,17 +347,13 @@ def add_velocity_columns_to_flight_log_df(self): self.flight_log_df.loc[idx, 'time(sec)'] = t_sec # Construct flight log velocity before each time instant. - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), adding velocity before columns...' - ) + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), adding velocity before columns...') empty_column = [np.nan] * len(self.flight_log_df.index) self.flight_log_df['velocity_before_x(m/sec)'] = empty_column self.flight_log_df['velocity_before_y(m/sec)'] = empty_column self.flight_log_df['velocity_before_z(m/sec)'] = empty_column self.flight_log_df['speed_before(m/sec)'] = empty_column - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...' - ) + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...') for ( idx ) in ( @@ -432,84 +364,50 @@ def add_velocity_columns_to_flight_log_df(self): y = self.flight_log_df.loc[idx, 'y(m)'] z = self.flight_log_df.loc[idx, 'z(m)'] t = self.flight_log_df.loc[idx, 'time(sec)'] - before_x = self.flight_log_df.loc[ - idx - self.velocity_calculation_offset_fwd_bwd, 'x(m)' - ] - before_y = self.flight_log_df.loc[ - idx - self.velocity_calculation_offset_fwd_bwd, 'y(m)' - ] - before_z = self.flight_log_df.loc[ - idx - self.velocity_calculation_offset_fwd_bwd, 'z(m)' - ] - before_t = self.flight_log_df.loc[ - idx - self.velocity_calculation_offset_fwd_bwd, 'time(sec)' - ] + before_x = self.flight_log_df.loc[idx - self.velocity_calculation_offset_fwd_bwd, 'x(m)'] + before_y = self.flight_log_df.loc[idx - self.velocity_calculation_offset_fwd_bwd, 'y(m)'] + before_z = self.flight_log_df.loc[idx - self.velocity_calculation_offset_fwd_bwd, 'z(m)'] + before_t = self.flight_log_df.loc[idx - self.velocity_calculation_offset_fwd_bwd, 'time(sec)'] delta_x = x - before_x delta_y = y - before_y delta_z = z - before_z delta_t = t - before_t d = np.sqrt(delta_x**2 + delta_y**2 + delta_z**2) - self.flight_log_df.loc[idx, 'velocity_before_x(m/sec)'] = ( - delta_x / delta_t - ) - self.flight_log_df.loc[idx, 'velocity_before_y(m/sec)'] = ( - delta_y / delta_t - ) - self.flight_log_df.loc[idx, 'velocity_before_z(m/sec)'] = ( - delta_z / delta_t - ) + self.flight_log_df.loc[idx, 'velocity_before_x(m/sec)'] = delta_x / delta_t + self.flight_log_df.loc[idx, 'velocity_before_y(m/sec)'] = delta_y / delta_t + self.flight_log_df.loc[idx, 'velocity_before_z(m/sec)'] = delta_z / delta_t self.flight_log_df.loc[idx, 'speed_before(m/sec)'] = d / delta_t # Construct flight log velocity after each time instant. - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), adding velocity after columns...' - ) + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), adding velocity after columns...') empty_column = [np.nan] * len(self.flight_log_df.index) self.flight_log_df['velocity_after_x(m/sec)'] = empty_column self.flight_log_df['velocity_after_y(m/sec)'] = empty_column self.flight_log_df['velocity_after_z(m/sec)'] = empty_column self.flight_log_df['speed_after(m/sec)'] = empty_column - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...' - ) + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...') for ( idx ) in ( self.flight_log_df.index ): # ?? SCAFFOLDING RCB -- THIS FOR LOOP IS VERY SLOW. CAN WE VECTORIZE IT? REPLACE THE .loc[] CALLS WITH SOMETHING FASTER? - if idx < ( - len(self.flight_log_df.index) - self.velocity_calculation_offset_fwd_bwd - ): + if idx < (len(self.flight_log_df.index) - self.velocity_calculation_offset_fwd_bwd): x = self.flight_log_df.loc[idx, 'x(m)'] y = self.flight_log_df.loc[idx, 'y(m)'] z = self.flight_log_df.loc[idx, 'z(m)'] t = self.flight_log_df.loc[idx, 'time(sec)'] - after_x = self.flight_log_df.loc[ - idx + self.velocity_calculation_offset_fwd_bwd, 'x(m)' - ] - after_y = self.flight_log_df.loc[ - idx + self.velocity_calculation_offset_fwd_bwd, 'y(m)' - ] - after_z = self.flight_log_df.loc[ - idx + self.velocity_calculation_offset_fwd_bwd, 'z(m)' - ] - after_t = self.flight_log_df.loc[ - idx + self.velocity_calculation_offset_fwd_bwd, 'time(sec)' - ] + after_x = self.flight_log_df.loc[idx + self.velocity_calculation_offset_fwd_bwd, 'x(m)'] + after_y = self.flight_log_df.loc[idx + self.velocity_calculation_offset_fwd_bwd, 'y(m)'] + after_z = self.flight_log_df.loc[idx + self.velocity_calculation_offset_fwd_bwd, 'z(m)'] + after_t = self.flight_log_df.loc[idx + self.velocity_calculation_offset_fwd_bwd, 'time(sec)'] delta_x = after_x - x delta_y = after_y - y delta_z = after_z - z delta_t = after_t - t d = np.sqrt(delta_x**2 + delta_y**2 + delta_z**2) - self.flight_log_df.loc[idx, 'velocity_after_x(m/sec)'] = ( - delta_x / delta_t - ) - self.flight_log_df.loc[idx, 'velocity_after_y(m/sec)'] = ( - delta_y / delta_t - ) - self.flight_log_df.loc[idx, 'velocity_after_z(m/sec)'] = ( - delta_z / delta_t - ) + self.flight_log_df.loc[idx, 'velocity_after_x(m/sec)'] = delta_x / delta_t + self.flight_log_df.loc[idx, 'velocity_after_y(m/sec)'] = delta_y / delta_t + self.flight_log_df.loc[idx, 'velocity_after_z(m/sec)'] = delta_z / delta_t self.flight_log_df.loc[idx, 'speed_after(m/sec)'] = d / delta_t # Construct flight log before/after average velocity for time instant, and local change in speed and direction. @@ -527,9 +425,7 @@ def add_velocity_columns_to_flight_log_df(self): self.flight_log_df['velocity_angle_z(rad)'] = empty_column self.flight_log_df['delta_velocity_angle_xy(rad)'] = empty_column self.flight_log_df['delta_velocity_angle_z(rad)'] = empty_column - print( - 'In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...' - ) + print('In TrajectoryAnalysis.add_velocity_columns_to_flight_log_df(), added, now filling...') for ( idx ) in ( @@ -554,23 +450,13 @@ def add_velocity_columns_to_flight_log_df(self): abs_delta_speed = abs(delta_speed) # Directions. velocity_angle_xy = self.velocity_angle_xy(average_vx, average_vy) - velocity_angle_z = self.velocity_angle_z( - average_vx, average_vy, average_vz - ) + velocity_angle_z = self.velocity_angle_z(average_vx, average_vy, average_vz) before_velocity_angle_xy = self.velocity_angle_xy(before_vx, before_vy) - before_velocity_angle_z = self.velocity_angle_z( - before_vx, before_vy, before_vz - ) + before_velocity_angle_z = self.velocity_angle_z(before_vx, before_vy, before_vz) after_velocity_angle_xy = self.velocity_angle_xy(after_vx, after_vy) - after_velocity_angle_z = self.velocity_angle_z( - after_vx, after_vy, after_vz - ) - delta_velocity_angle_xy = angle.angle2_minus_angle_1( - before_velocity_angle_xy, after_velocity_angle_xy - ) - delta_velocity_angle_z = angle.angle2_minus_angle_1( - before_velocity_angle_z, after_velocity_angle_z - ) + after_velocity_angle_z = self.velocity_angle_z(after_vx, after_vy, after_vz) + delta_velocity_angle_xy = angle.angle2_minus_angle_1(before_velocity_angle_xy, after_velocity_angle_xy) + delta_velocity_angle_z = angle.angle2_minus_angle_1(before_velocity_angle_z, after_velocity_angle_z) # Store results. self.flight_log_df.loc[idx, 'velocity_average_x(m/sec)'] = average_vx self.flight_log_df.loc[idx, 'velocity_average_y(m/sec)'] = average_vy @@ -578,16 +464,10 @@ def add_velocity_columns_to_flight_log_df(self): self.flight_log_df.loc[idx, 'speed_average(m/sec)'] = average_s self.flight_log_df.loc[idx, 'delta_speed(m/sec)'] = delta_speed self.flight_log_df.loc[idx, 'abs_delta_speed(m/sec)'] = abs_delta_speed - self.flight_log_df.loc[idx, 'velocity_angle_xy(rad)'] = ( - velocity_angle_xy - ) + self.flight_log_df.loc[idx, 'velocity_angle_xy(rad)'] = velocity_angle_xy self.flight_log_df.loc[idx, 'velocity_angle_z(rad)'] = velocity_angle_z - self.flight_log_df.loc[idx, 'delta_velocity_angle_xy(rad)'] = ( - delta_velocity_angle_xy - ) - self.flight_log_df.loc[idx, 'delta_velocity_angle_z(rad)'] = ( - delta_velocity_angle_z - ) + self.flight_log_df.loc[idx, 'delta_velocity_angle_xy(rad)'] = delta_velocity_angle_xy + self.flight_log_df.loc[idx, 'delta_velocity_angle_z(rad)'] = delta_velocity_angle_z def velocity_angle_xy(self, velocity_x, velocity_y): """ @@ -613,9 +493,7 @@ def velocity_angle_z(self, velocity_x, velocity_y, velocity_z): elif velocity_z < 0: return -(math.pi / 2.0) else: - print( - 'ERROR: In 190_TrajectoryAnalysis.velocity_direction_z(), unexpected situation encountered.' - ) + print('ERROR: In 190_TrajectoryAnalysis.velocity_direction_z(), unexpected situation encountered.') assert False else: return math.atan2(velocity_z, velocity_xy) @@ -633,9 +511,7 @@ def gps_ymdhmsz_given_flight_log_seconds( It is simply the conversion from the flight log seconds column to the flight log GPS time columns. """ - return tdt.add_seconds_to_ymdhmsz( - self.gps_ymdhmsz_given_flight_log_zero_seconds, time_sec - ) + return tdt.add_seconds_to_ymdhmsz(self.gps_ymdhmsz_given_flight_log_zero_seconds, time_sec) def compute_gps_ymdhmsz_given_flight_log_zero_seconds(self): """ @@ -662,19 +538,9 @@ def compute_gps_ymdhmsz_given_flight_log_zero_seconds(self): zone = self.when_ymdhmsz[6] # Assemble the GPS time in ymdhmsz format. # ?? SCAFFOLDING RCB -- THIS SUGGESTS MIGRATING THE YMDHMSZ FORMAT TO A STANDARD PYTHON DATE TIME FORMAT, THROUGHOUT. - gps_ymdhmsz = [ - gps_year, - gps_month, - gps_day, - gps_hour, - gps_minute, - gps_second, - zone, - ] + gps_ymdhmsz = [gps_year, gps_month, gps_day, gps_hour, gps_minute, gps_second, zone] # Subtract the first-row time in seconds, to obtain the time corresponding to flight log seconds = 0. - zero_time_ymdhmsz = tdt.subtract_seconds_from_ymdhmsz( - gps_ymdhmsz, flight_log_first_row_time_sec - ) + zero_time_ymdhmsz = tdt.subtract_seconds_from_ymdhmsz(gps_ymdhmsz, flight_log_first_row_time_sec) # Return. return zero_time_ymdhmsz @@ -756,59 +622,24 @@ def find_gps_velocity_xy_change_maxima(self, sign): # sign is +1 or -1. for idx in self.flight_log_df.index: if (idx > 4) and (idx < (len(self.flight_log_df.index) - 5)): # Fetch values. - prev5_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx - 5, 'delta_velocity_angle_xy(rad)'] - ) - prev4_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx - 4, 'delta_velocity_angle_xy(rad)'] - ) - prev3_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx - 3, 'delta_velocity_angle_xy(rad)'] - ) - prev2_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx - 2, 'delta_velocity_angle_xy(rad)'] - ) - prev1_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx - 1, 'delta_velocity_angle_xy(rad)'] - ) - this_delta_velocity_angle_xy = ( - sign * self.flight_log_df.loc[idx, 'delta_velocity_angle_xy(rad)'] - ) - next1_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx + 1, 'delta_velocity_angle_xy(rad)'] - ) - next2_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx + 2, 'delta_velocity_angle_xy(rad)'] - ) - next3_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx + 3, 'delta_velocity_angle_xy(rad)'] - ) - next4_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx + 4, 'delta_velocity_angle_xy(rad)'] - ) - next5_delta_velocity_angle_xy = ( - sign - * self.flight_log_df.loc[idx + 5, 'delta_velocity_angle_xy(rad)'] - ) + prev5_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx - 5, 'delta_velocity_angle_xy(rad)'] + prev4_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx - 4, 'delta_velocity_angle_xy(rad)'] + prev3_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx - 3, 'delta_velocity_angle_xy(rad)'] + prev2_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx - 2, 'delta_velocity_angle_xy(rad)'] + prev1_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx - 1, 'delta_velocity_angle_xy(rad)'] + this_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx, 'delta_velocity_angle_xy(rad)'] + next1_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx + 1, 'delta_velocity_angle_xy(rad)'] + next2_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx + 2, 'delta_velocity_angle_xy(rad)'] + next3_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx + 3, 'delta_velocity_angle_xy(rad)'] + next4_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx + 4, 'delta_velocity_angle_xy(rad)'] + next5_delta_velocity_angle_xy = sign * self.flight_log_df.loc[idx + 5, 'delta_velocity_angle_xy(rad)'] if ( (prev5_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) and (prev4_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) and (prev3_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) and (prev2_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) and (prev1_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) - and ( - this_delta_velocity_angle_xy - >= self.delta_velocity_angle_xy_peak_threshold - ) + and (this_delta_velocity_angle_xy >= self.delta_velocity_angle_xy_peak_threshold) and (next1_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) and (next2_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) and (next3_delta_velocity_angle_xy <= this_delta_velocity_angle_xy) @@ -822,9 +653,7 @@ def find_gps_velocity_xy_change_maxima(self, sign): # sign is +1 or -1. this_z = self.flight_log_df.loc[idx, 'z(m)'] maximum_dict = {} maximum_dict['time'] = this_time - maximum_dict['delta_velocity_angle_xy'] = ( - sign * this_delta_velocity_angle_xy - ) + maximum_dict['delta_velocity_angle_xy'] = sign * this_delta_velocity_angle_xy maximum_dict['x'] = this_x maximum_dict['y'] = this_y maximum_dict['z'] = this_z @@ -838,11 +667,7 @@ def find_velocity_xy_change_maximum_to_minimum_scan_passes( self, ): # ?? SCAFFOLDING RCB -- THIS ROUTINE DUPLICATES ITS PAIR. CAN THEY BE MERGED, WHILE RETAINING CORRECTNESS AND CLARITY? maximum_to_minimum_pass_pair_list = [] - for ( - maximum_dict - ) in ( - self.gps_velocity_xy_change_maxima - ): # Here "maximum" means "maximum_velocity_xy_change" + for maximum_dict in self.gps_velocity_xy_change_maxima: # Here "maximum" means "maximum_velocity_xy_change" # Here is a local maximum time to work with. time_maximum = maximum_dict[ 'time' @@ -855,9 +680,7 @@ def find_velocity_xy_change_maximum_to_minimum_scan_passes( # See if there is a local maximum in between. for alternative_maximum_dict in self.gps_velocity_xy_change_maxima: alternative_time_maximum = alternative_maximum_dict['time'] - if (alternative_time_maximum > time_maximum) and ( - alternative_time_maximum < time_minimum - ): + if (alternative_time_maximum > time_maximum) and (alternative_time_maximum < time_minimum): maximum_dict = alternative_maximum_dict if alternative_time_maximum > time_minimum: break @@ -886,12 +709,8 @@ def find_velocity_xy_change_maximum_to_minimum_scan_passes( speed_count = 0 for idx in self.flight_log_df.index: time = self.flight_log_df.loc[idx, 'time(sec)'] - if (interval_start_time <= time) and ( - time <= interval_end_time - ): - speed_sum += self.flight_log_df.loc[ - idx, 'speed_average(m/sec)' - ] + if (interval_start_time <= time) and (time <= interval_end_time): + speed_sum += self.flight_log_df.loc[idx, 'speed_average(m/sec)'] speed_count += 1 if speed_count <= 0: print( @@ -903,9 +722,7 @@ def find_velocity_xy_change_maximum_to_minimum_scan_passes( min_scan_speed = self.nominal_scan_speed - self.scan_speed_tolerance max_scan_speed = self.nominal_scan_speed + self.scan_speed_tolerance # print('In TrajectoryAnalysis.__init__(), [min_scan_speed, max_scan_speed] = [',min_scan_speed, ',',max_scan_speed,']; interval_average_speed = ', interval_average_speed) # ?? SCAFFOLDING RCB -- TEMPORARY - if (min_scan_speed <= interval_average_speed) and ( - interval_average_speed <= max_scan_speed - ): + if (min_scan_speed <= interval_average_speed) and (interval_average_speed <= max_scan_speed): # Then the intervening motion speed matches the expected speed. # ?? SCAFFOLDING RCB -- THIS CODE REPLICATION IS GETTING SERIOUSLY UGLY. E.G., WRITE A ROUTINE TO FETCH AVERAGE VALUE OF ARBITRARY PARAMETER. # See if the interval between the maximum and the minimum matches the scan z velocity. @@ -914,12 +731,8 @@ def find_velocity_xy_change_maximum_to_minimum_scan_passes( velocity_z_count = 0 for idx in self.flight_log_df.index: time = self.flight_log_df.loc[idx, 'time(sec)'] - if (interval_start_time <= time) and ( - time <= interval_end_time - ): - velocity_z_sum += self.flight_log_df.loc[ - idx, 'velocity_average_z(m/sec)' - ] + if (interval_start_time <= time) and (time <= interval_end_time): + velocity_z_sum += self.flight_log_df.loc[idx, 'velocity_average_z(m/sec)'] velocity_z_count += 1 if velocity_z_count <= 0: print( @@ -928,23 +741,15 @@ def find_velocity_xy_change_maximum_to_minimum_scan_passes( ) assert False interval_average_velocity_z = velocity_z_sum / velocity_z_count - min_scan_velocity_z = ( - self.nominal_scan_velocity_z - - self.scan_velocity_z_tolerance - ) - max_scan_velocity_z = ( - self.nominal_scan_velocity_z - + self.scan_velocity_z_tolerance - ) + min_scan_velocity_z = self.nominal_scan_velocity_z - self.scan_velocity_z_tolerance + max_scan_velocity_z = self.nominal_scan_velocity_z + self.scan_velocity_z_tolerance # print('In TrajectoryAnalysis.__init__(), [min_scan_velocity_z, max_scan_velocity_z] = [',min_scan_velocity_z, ',',max_scan_velocity_z,']; interval_average_velocity_z = ', interval_average_velocity_z) # ?? SCAFFOLDING RCB -- TEMPORARY if (min_scan_velocity_z <= interval_average_velocity_z) and ( interval_average_velocity_z <= max_scan_velocity_z ): # Then the intervening motion velocity_z matches the expected velocity_z. # The [time_maximum, time_minimum] interval passes all tests. - maximum_to_minimum_pass_pair_list.append( - maximum_to_minimum_pair - ) + maximum_to_minimum_pass_pair_list.append(maximum_to_minimum_pair) # Return. return maximum_to_minimum_pass_pair_list @@ -952,11 +757,7 @@ def find_velocity_xy_change_minimum_to_maximum_scan_passes( self, ): # ?? SCAFFOLDING RCB -- THIS ROUTINE DUPLICATES ITS PAIR. CAN THEY BE MERGED, WHILE RETAINING CORRECTNESS AND CLARITY? minimum_to_maximum_pass_pair_list = [] - for ( - minimum_dict - ) in ( - self.gps_velocity_xy_change_minima - ): # Here "minimum" means "minimum_velocity_xy_change" + for minimum_dict in self.gps_velocity_xy_change_minima: # Here "minimum" means "minimum_velocity_xy_change" # Here is a local minimum time to work with. time_minimum = minimum_dict[ 'time' @@ -969,9 +770,7 @@ def find_velocity_xy_change_minimum_to_maximum_scan_passes( # See if there is a local minimum in between. for alternative_minimum_dict in self.gps_velocity_xy_change_minima: alternative_time_minimum = alternative_minimum_dict['time'] - if (alternative_time_minimum > time_minimum) and ( - alternative_time_minimum < time_maximum - ): + if (alternative_time_minimum > time_minimum) and (alternative_time_minimum < time_maximum): minimum_dict = alternative_minimum_dict if alternative_time_minimum > time_maximum: break @@ -1000,12 +799,8 @@ def find_velocity_xy_change_minimum_to_maximum_scan_passes( speed_count = 0 for idx in self.flight_log_df.index: time = self.flight_log_df.loc[idx, 'time(sec)'] - if (interval_start_time <= time) and ( - time <= interval_end_time - ): - speed_sum += self.flight_log_df.loc[ - idx, 'speed_average(m/sec)' - ] + if (interval_start_time <= time) and (time <= interval_end_time): + speed_sum += self.flight_log_df.loc[idx, 'speed_average(m/sec)'] speed_count += 1 if speed_count <= 0: print( @@ -1017,9 +812,7 @@ def find_velocity_xy_change_minimum_to_maximum_scan_passes( min_scan_speed = self.nominal_scan_speed - self.scan_speed_tolerance max_scan_speed = self.nominal_scan_speed + self.scan_speed_tolerance # print('In TrajectoryAnalysis.__init__(), [min_scan_speed, max_scan_speed] = [',min_scan_speed, ',',max_scan_speed,']; interval_average_speed = ', interval_average_speed) # ?? SCAFFOLDING RCB -- TEMPORARY - if (min_scan_speed <= interval_average_speed) and ( - interval_average_speed <= max_scan_speed - ): + if (min_scan_speed <= interval_average_speed) and (interval_average_speed <= max_scan_speed): # Then the intervening motion speed matches the expected speed. # ?? SCAFFOLDING RCB -- THIS CODE REPLICATION IS GETTING SERIOUSLY UGLY. E.G., WRITE A ROUTINE TO FETCH AVERAGE VALUE OF ARBITRARY PARAMETER. # See if the interval between the maximum and the minimum matches the scan z velocity. @@ -1028,12 +821,8 @@ def find_velocity_xy_change_minimum_to_maximum_scan_passes( velocity_z_count = 0 for idx in self.flight_log_df.index: time = self.flight_log_df.loc[idx, 'time(sec)'] - if (interval_start_time <= time) and ( - time <= interval_end_time - ): - velocity_z_sum += self.flight_log_df.loc[ - idx, 'velocity_average_z(m/sec)' - ] + if (interval_start_time <= time) and (time <= interval_end_time): + velocity_z_sum += self.flight_log_df.loc[idx, 'velocity_average_z(m/sec)'] velocity_z_count += 1 if velocity_z_count <= 0: print( @@ -1042,23 +831,15 @@ def find_velocity_xy_change_minimum_to_maximum_scan_passes( ) assert False interval_average_velocity_z = velocity_z_sum / velocity_z_count - min_scan_velocity_z = ( - self.nominal_scan_velocity_z - - self.scan_velocity_z_tolerance - ) - max_scan_velocity_z = ( - self.nominal_scan_velocity_z - + self.scan_velocity_z_tolerance - ) + min_scan_velocity_z = self.nominal_scan_velocity_z - self.scan_velocity_z_tolerance + max_scan_velocity_z = self.nominal_scan_velocity_z + self.scan_velocity_z_tolerance # print('In TrajectoryAnalysis.__init__(), [min_scan_velocity_z, max_scan_velocity_z] = [',min_scan_velocity_z, ',',max_scan_velocity_z,']; interval_average_velocity_z = ', interval_average_velocity_z) # ?? SCAFFOLDING RCB -- TEMPORARY if (min_scan_velocity_z <= interval_average_velocity_z) and ( interval_average_velocity_z <= max_scan_velocity_z ): # Then the intervening motion velocity_z matches the expected velocity_z. # The [time_minimum, time_maximum] interval passes all tests. - minimum_to_maximum_pass_pair_list.append( - minimum_to_maximum_pair - ) + minimum_to_maximum_pass_pair_list.append(minimum_to_maximum_pair) # Return. return minimum_to_maximum_pass_pair_list @@ -1124,10 +905,7 @@ def convert_gps_minima_maxima_passes_to_scan_passes(self): # ] # Combine max-to-min and min-to-max pass pairs, and sort in order of increasing time. - start_stop_pass_pair_list = ( - self.maximum_to_minimum_pass_pair_list - + self.minimum_to_maximum_pass_pair_list - ) + start_stop_pass_pair_list = self.maximum_to_minimum_pass_pair_list + self.minimum_to_maximum_pass_pair_list start_stop_pass_pair_list.sort(key=self.start_stop_pass_pair_start_time) # start_stop_pass_pair_list = [ # ?? SCAFFOLDING RCB -- Values computed November 16 at 7:30 AM, hard-coded as a development shortcut. @@ -1163,14 +941,10 @@ def convert_gps_minima_maxima_passes_to_scan_passes(self): gps_scan_passes = [] for start_stop_pass_pair in start_stop_pass_pair_list: # Fetch trajectory points along pass. - xyzt_list = self.xyzt_list_given_gps_start_stop_pass_pair( - start_stop_pass_pair - ) + xyzt_list = self.xyzt_list_given_gps_start_stop_pass_pair(start_stop_pass_pair) # If there is a pause, break the list up. - list_of_xyzt_lists_1 = ( - self.split_xyzt_list_where_inter_point_speed_below_minimum( - xyzt_list, self.minimum_gps_pass_inter_point_speed - ) + list_of_xyzt_lists_1 = self.split_xyzt_list_where_inter_point_speed_below_minimum( + xyzt_list, self.minimum_gps_pass_inter_point_speed ) # Discard lists that are too short. list_of_xyzt_lists_2 = self.discard_xyzt_lists_where_length_below_minimum( @@ -1181,9 +955,7 @@ def convert_gps_minima_maxima_passes_to_scan_passes(self): # Construct the scan pass object. if len(xyzt_list) >= 2: gps_scan_pass = self.scan_pass_given_xyzt_list( - xyzt_list, - n_start_margin=self.gps_pass_start_margin, - n_stop_margin=self.gps_pass_stop_margin, + xyzt_list, n_start_margin=self.gps_pass_start_margin, n_stop_margin=self.gps_pass_stop_margin ) # Add to list. gps_scan_passes.append(gps_scan_pass) @@ -1203,14 +975,9 @@ def scan_pass_given_xyzt_list( self, xyzt_list, n_start_margin, n_stop_margin ): # ?? SCAFFOLDING RCB -- THIS START_MARGIN, STOP_MARGIN IS A STUB HACK. # Refine pass, collecting statistical variation data. - ( - stable_begin_xyzt, - stable_end_xyzt, - line_3d, - inlier_xyzt_list, - distance_to_line_list, - rms_distance_to_line, - ) = self.refine_xyzt_list(xyzt_list, n_start_margin, n_stop_margin) + (stable_begin_xyzt, stable_end_xyzt, line_3d, inlier_xyzt_list, distance_to_line_list, rms_distance_to_line) = ( + self.refine_xyzt_list(xyzt_list, n_start_margin, n_stop_margin) + ) # Construct scan pass object. scan_pass = ( {} @@ -1241,9 +1008,7 @@ def refine_xyzt_list(self, xyzt_list, n_start_margin, n_stop_margin=20): ) inlier_xyzt_list = xyzt_list else: - inlier_xyzt_list = xyzt_list[ - n_start_margin : len(xyzt_list) - n_stop_margin - ] + inlier_xyzt_list = xyzt_list[n_start_margin : len(xyzt_list) - n_stop_margin] stable_begin_xyzt = inlier_xyzt_list[ 0 ] # ?? SCAFFOLDING RCB -- STUB HACK. THIS SHOULD USE A FIT LINE, NOT FIRST AND LAST POINTS. @@ -1255,9 +1020,7 @@ def refine_xyzt_list(self, xyzt_list, n_start_margin, n_stop_margin=20): line_3d = g3d.construct_line_3d_given_two_points( line_3d_xyz_1, line_3d_xyz_2 ) # ?? SCAFFOLDING RCB -- LINE-3D SHOULD BE A CLASS. - distance_to_line_list = [ - g3d.distance_to_line_3d(xyzt[0:3], line_3d) for xyzt in inlier_xyzt_list - ] + distance_to_line_list = [g3d.distance_to_line_3d(xyzt[0:3], line_3d) for xyzt in inlier_xyzt_list] rms_distance_to_line = mt.rms(distance_to_line_list) # Return. return ( @@ -1280,9 +1043,7 @@ def load_trajectory_fragments(self): hel_frames_dict = {} for hel_name in self.solar_field.heliostat_name_list(): if ft.directory_exists(self.input_reconstructed_heliostats_dir): - reconstructed_hel_dir = os.path.join( - self.input_reconstructed_heliostats_dir, hel_name - ) + reconstructed_hel_dir = os.path.join(self.input_reconstructed_heliostats_dir, hel_name) if ft.directory_exists(reconstructed_hel_dir): frame_parameters_dir_body_ext = os.path.join( reconstructed_hel_dir, hel_name + '_frame_dict_parameters.csv' @@ -1376,36 +1137,21 @@ def initialize_synchronization_pair_list(self): # ?? SCAFFOLDING RCB -- THIS LITERAL IS BECAUSE THIS IS A TEMPORARY PATCH, UNTIL TIME SYNCHRONIZXATION IS DONE AS PART OF RTK GPS/FRAME SYNCRHONIZATION, AND/OR KEY FRAME SELECTION(?). synch_camera_hel_name_list = ['5W9', '5W1', '5E3'] synch_pair_list = [] - for synch_gps_pass_idx, synch_camera_hel_name in zip( - synch_gps_pass_idx_list, synch_camera_hel_name_list - ): + for synch_gps_pass_idx, synch_camera_hel_name in zip(synch_gps_pass_idx_list, synch_camera_hel_name_list): # GPS spec. # ?? SCAFFOLDING RCB -- THIS LITERAL IS BECAUSE THIS IS A TEMPORARY PATCH, UNTIL TIME SYNCHRONIZXATION IS DONE AS PART OF RTK GPS/FRAME SYNCRHONIZATION, AND/OR KEY FRAME SELECTION(?). synch_gps_pass_type = 'max_to_min' # ?? SCAFFOLDING RCB -- THIS LITERAL IS BECAUSE THIS IS A TEMPORARY PATCH, UNTIL TIME SYNCHRONIZXATION IS DONE AS PART OF RTK GPS/FRAME SYNCRHONIZATION, AND/OR KEY FRAME SELECTION(?). synch_gps_start_or_stop = 'stop' - synch_gps_time = self.find_gps_synch_time( - synch_gps_pass_type, synch_gps_pass_idx, synch_gps_start_or_stop - ) + synch_gps_time = self.find_gps_synch_time(synch_gps_pass_type, synch_gps_pass_idx, synch_gps_start_or_stop) # ?? SCAFFOLDING RCB -- THIS INFORMATION IS DEFINED IN DIFFERENT PLACES IN THE CODE, CREATING A VULNERABILITY TO POSSIBLE FURTURE BUGS DUE TO MISMATCHES. IF THIS APPROACH SURVIVES, RESOLVE THIS. - synch_gps_spec = [ - synch_gps_pass_type, - synch_gps_pass_idx, - synch_gps_start_or_stop, - synch_gps_time, - ] + synch_gps_spec = [synch_gps_pass_type, synch_gps_pass_idx, synch_gps_start_or_stop, synch_gps_time] # Camera spec. # Which pause to select out of heliostat camera scan trajectory fragment. # ?? SCAFFOLDING RCB -- THIS LITERAL IS BECAUSE THIS IS A TEMPORARY PATCH, UNTIL TIME SYNCHRONIZXATION IS DONE AS PART OF RTK GPS/FRAME SYNCRHONIZATION, AND/OR KEY FRAME SELECTION(?). synch_camera_pause_idx = 0 - synch_camera_frame_id = self.find_camera_synch_frame( - synch_camera_hel_name, synch_camera_pause_idx - ) + synch_camera_frame_id = self.find_camera_synch_frame(synch_camera_hel_name, synch_camera_pause_idx) # ?? SCAFFOLDING RCB -- THIS INFORMATION IS DEFINED IN DIFFERENT PLACES IN THE CODE, CREATING A VULNERABILITY TO POSSIBLE FURTURE BUGS DUE TO MISMATCHES. IF THIS APPROACH SURVIVES, RESOLVE THIS. - synch_camera_spec = [ - synch_camera_hel_name, - synch_camera_pause_idx, - synch_camera_frame_id, - ] + synch_camera_spec = [synch_camera_hel_name, synch_camera_pause_idx, synch_camera_frame_id] # Pair. synch_pair = [ synch_gps_spec, @@ -1415,9 +1161,7 @@ def initialize_synchronization_pair_list(self): # Return. return synch_pair_list - def find_gps_synch_time( - self, synch_gps_pass_type, synch_gps_pass_idx, synch_gps_start_or_stop - ): + def find_gps_synch_time(self, synch_gps_pass_type, synch_gps_pass_idx, synch_gps_start_or_stop): if synch_gps_pass_type == 'min_to_max': pass_pair_list = self.maximum_to_minimum_pass_pair_list elif synch_gps_pass_type == 'max_to_min': @@ -1496,10 +1240,7 @@ def get_synchronization_frame_id(self, hel_name): synch_hel_name = synch_camera_spec[0] if synch_hel_name == hel_name: return synch_camera_spec[2] - print( - 'In TrajectoryAnalysis.get_synchronization_frame_id(), did not find input hel_name =', - str(hel_name), - ) + print('In TrajectoryAnalysis.get_synchronization_frame_id(), did not find input hel_name =', str(hel_name)) assert False def initialize_synchronization_constants(self): @@ -1570,33 +1311,23 @@ def construct_heliostat_camera_passes(self): camera_xyzt_list.append(camera_xyzt) # Split the (x,y,z,t) list into contiguous segments, corresponding to separate passes. # ?? SCAFFOLDING RCB -- REWORK THIS, WITHOUT THE MAGIC NUMBER DISCONNECT_THRESHOLD? - list_of_camera_xyzt_lists_1 = ( - self.split_xyzt_list_where_distance_exceeds_maximum( - camera_xyzt_list, self.maximum_camera_pass_inter_point_distance - ) + list_of_camera_xyzt_lists_1 = self.split_xyzt_list_where_distance_exceeds_maximum( + camera_xyzt_list, self.maximum_camera_pass_inter_point_distance ) # Discard lists that are too short. - list_of_camera_xyzt_lists_2 = ( - self.discard_xyzt_lists_where_length_below_minimum( - list_of_camera_xyzt_lists_1, - self.minimum_camera_pass_number_of_points, - ) + list_of_camera_xyzt_lists_2 = self.discard_xyzt_lists_where_length_below_minimum( + list_of_camera_xyzt_lists_1, self.minimum_camera_pass_number_of_points ) # Split the resulting (x,y,z,t) lists by speed into segments with continuous motion, thus eliminating connected sequences that include stopping and proceeding in a differnt direction. list_of_camera_xyzt_lists_3 = [] for camera_xyzt_list in list_of_camera_xyzt_lists_2: # If there is a pause, break the list up. - list_of_camera_xyzt_lists_A = ( - self.split_xyzt_list_where_inter_point_speed_below_minimum( - camera_xyzt_list, self.minimum_camera_pass_inter_point_speed - ) + list_of_camera_xyzt_lists_A = self.split_xyzt_list_where_inter_point_speed_below_minimum( + camera_xyzt_list, self.minimum_camera_pass_inter_point_speed ) # Discard lists that are too short. - list_of_camera_xyzt_lists_B = ( - self.discard_xyzt_lists_where_length_below_minimum( - list_of_camera_xyzt_lists_A, - self.minimum_camera_pass_number_of_points, - ) + list_of_camera_xyzt_lists_B = self.discard_xyzt_lists_where_length_below_minimum( + list_of_camera_xyzt_lists_A, self.minimum_camera_pass_number_of_points ) # Add the result to our accumulating list. list_of_camera_xyzt_lists_3 += list_of_camera_xyzt_lists_B @@ -1623,9 +1354,7 @@ def split_xyz_list_where_distance_exceeds_maximum( return [xyz_list] else: first_xyz = xyz_list[0] - remainder = self.split_xyz_list_where_distance_exceeds_maximum( - xyz_list[1:], maximum_inter_point_distance - ) + remainder = self.split_xyz_list_where_distance_exceeds_maximum(xyz_list[1:], maximum_inter_point_distance) first_remainder_xyz_list = remainder[0] first_remainder_xyz = first_remainder_xyz_list[0] d = np.sqrt( @@ -1648,9 +1377,7 @@ def split_xyzt_list_where_distance_exceeds_maximum( return [xyzt_list] else: first_xyzt = xyzt_list[0] - remainder = self.split_xyzt_list_where_distance_exceeds_maximum( - xyzt_list[1:], maximum_inter_point_distance - ) + remainder = self.split_xyzt_list_where_distance_exceeds_maximum(xyzt_list[1:], maximum_inter_point_distance) first_remainder_xyzt_list = remainder[0] first_remainder_xyzt = first_remainder_xyzt_list[0] d = np.sqrt( @@ -1664,9 +1391,7 @@ def split_xyzt_list_where_distance_exceeds_maximum( remainder[0] = [first_xyzt] + remainder[0] return remainder - def split_xyzt_list_where_inter_point_speed_below_minimum( - self, xyzt_list, minimum_inter_point_speed - ): + def split_xyzt_list_where_inter_point_speed_below_minimum(self, xyzt_list, minimum_inter_point_speed): if len(xyzt_list) == 0: return [] elif len(xyzt_list) == 1: @@ -1702,9 +1427,7 @@ def split_xyzt_list_where_inter_point_speed_below_minimum( remainder[0] = [first_xyzt] + remainder[0] return remainder - def discard_xyzt_lists_where_length_below_minimum( - self, list_of_xyzt_lists, minimum_list_length - ): + def discard_xyzt_lists_where_length_below_minimum(self, list_of_xyzt_lists, minimum_list_length): list_of_sufficiently_long_xyzt_lists = [] for xyzt_list in list_of_xyzt_lists: if len(xyzt_list) >= minimum_list_length: @@ -1749,9 +1472,7 @@ def construct_hel_gps_camera_analysis_dict(self): ) pass else: - hel_gps_camera_analysis_dict[hel_name] = ( - list_of_gps_camera_analysis_dicts - ) + hel_gps_camera_analysis_dict[hel_name] = list_of_gps_camera_analysis_dicts # Return. return hel_gps_camera_analysis_dict @@ -1770,9 +1491,7 @@ def find_matching_gps_pass(self, camera_pass): 3 ] # ?? SCAFFOLDING RCB -- HAVE THIS FETCH ORIGINAL FULL-LENGTH START AND STOP TIME INTERVAL. # Check for overlap. - if (gps_pass_begin_time <= camera_pass_end_time) and ( - camera_pass_begin_time <= gps_pass_end_time - ): + if (gps_pass_begin_time <= camera_pass_end_time) and (camera_pass_begin_time <= gps_pass_end_time): # There is temporal overlap. overlap_min = max(camera_pass_begin_time, gps_pass_begin_time) overlap_max = min(camera_pass_end_time, gps_pass_end_time) @@ -1806,9 +1525,7 @@ def construct_gps_camera_analysis_dict( self, hel_name, gps_pass, camera_pass ): # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS. # Fetch camera pass time interval. - time_begin = camera_pass['stable_begin_xyzt'][ - 3 - ] # Times correspond to the seconds column in the flight log. + time_begin = camera_pass['stable_begin_xyzt'][3] # Times correspond to the seconds column in the flight log. time_end = camera_pass['stable_end_xyzt'][3] # time_mid = (time_begin + time_end) / 2.0 # # Compute heliostat (azimuth, elevation) from aim point and time. @@ -1819,9 +1536,7 @@ def construct_gps_camera_analysis_dict( elevation_from_model_mid, azimuth_from_model_end, elevation_from_model_end, - ) = self.compute_model_azimuth_elevation( - hel_name, time_begin, time_mid, time_end - ) + ) = self.compute_model_azimuth_elevation(hel_name, time_begin, time_mid, time_end) # Compute heliostat (azimuth, elevation) that will bring camera pass into parallel alignment with GPS pass. (azimuth_from_alignment, elevation_from_alignment, alignment_angle_error) = ( self.compute_alignment_azimuth_elevation( @@ -1844,19 +1559,14 @@ def construct_gps_camera_analysis_dict( # Compute camera_pass points, transformed by (azimuth,elevation). heliostat = self.solar_field.lookup_heliostat(hel_name) transform_translation = heliostat.centroid_nparray - transform_rotation = Heliostat.heliostat_rotation_matrix( - azimuth_from_alignment, elevation_from_alignment - ) + transform_rotation = Heliostat.heliostat_rotation_matrix(azimuth_from_alignment, elevation_from_alignment) camera_xyzt_list = camera_pass['inlier_xyzt_list'] transformed_camera_xyzt_list = [] for camera_xyzt in camera_xyzt_list: camera_xyz_heliostat = camera_xyzt[0:3] camera_time = camera_xyzt[3] # ?? SCAFFOLDING RCB -- CONVERSION TO/FROM LIST AGAIN SUGGESTS CONVERTING TO NUMPY ARRAYS THROUGHOUT. - camera_xyz_world = list( - transform_translation - + transform_rotation.dot(np.array(camera_xyz_heliostat)) - ) + camera_xyz_world = list(transform_translation + transform_rotation.dot(np.array(camera_xyz_heliostat))) camera_xyzt_world = camera_xyz_world + [camera_time] transformed_camera_xyzt_list.append(camera_xyzt_world) per_heliostat_transformed_camera_pass = self.scan_pass_given_xyzt_list( @@ -1870,13 +1580,9 @@ def construct_gps_camera_analysis_dict( camera_gps_distance_squared_count = 0 for transformed_camera_xyzt in transformed_camera_xyzt_list: transformed_camera_inlier_xyz = transformed_camera_xyzt[0:3] - nearest_gps_xyz = g3d.closest_point_on_line_3d( - transformed_camera_inlier_xyz, gps_line_3d - ) + nearest_gps_xyz = g3d.closest_point_on_line_3d(transformed_camera_inlier_xyz, gps_line_3d) camera_gps_point_pair = [transformed_camera_inlier_xyz, nearest_gps_xyz] - camera_gps_distance = g3d.distance_between_xyz_points( - transformed_camera_inlier_xyz, nearest_gps_xyz - ) + camera_gps_distance = g3d.distance_between_xyz_points(transformed_camera_inlier_xyz, nearest_gps_xyz) camera_gps_point_pair_list.append(camera_gps_point_pair) camera_gps_distance_list.append(camera_gps_distance) camera_gps_distance_squared_sum += camera_gps_distance**2 @@ -1887,9 +1593,7 @@ def construct_gps_camera_analysis_dict( 'ERROR: In TrajectoryAnalysis.construct_gps_camera_analysis_dict(), encountered empty camera pass inlier xyzt list.' ) assert False - rms_distance = np.sqrt( - camera_gps_distance_squared_sum / camera_gps_distance_squared_count - ) + rms_distance = np.sqrt(camera_gps_distance_squared_sum / camera_gps_distance_squared_count) # Store results. gps_camera_analysis_dict = {} gps_camera_analysis_dict['hel_name'] = hel_name @@ -1901,9 +1605,7 @@ def construct_gps_camera_analysis_dict( gps_camera_analysis_dict['time_mid'] = time_mid # (azimuth, elevation) from aim point and time. gps_camera_analysis_dict['azimuth_from_model_begin'] = azimuth_from_model_begin - gps_camera_analysis_dict['elevation_from_model_begin'] = ( - elevation_from_model_begin - ) + gps_camera_analysis_dict['elevation_from_model_begin'] = elevation_from_model_begin gps_camera_analysis_dict['azimuth_from_model_mid'] = azimuth_from_model_mid gps_camera_analysis_dict['elevation_from_model_mid'] = elevation_from_model_mid gps_camera_analysis_dict['azimuth_from_model_end'] = azimuth_from_model_end @@ -1926,36 +1628,19 @@ def construct_gps_camera_analysis_dict( # gps_camera_analysis_dict['azimuth_target_from_log_end'] = azimuth_target_from_log_end # gps_camera_analysis_dict['elevation_target_from_log_end'] = elevation_target_from_log_end # Corresponding point analysis. - gps_camera_analysis_dict['per_heliostat_transformed_camera_pass'] = ( - per_heliostat_transformed_camera_pass - ) - gps_camera_analysis_dict['camera_gps_point_pair_list'] = ( - camera_gps_point_pair_list - ) + gps_camera_analysis_dict['per_heliostat_transformed_camera_pass'] = per_heliostat_transformed_camera_pass + gps_camera_analysis_dict['camera_gps_point_pair_list'] = camera_gps_point_pair_list gps_camera_analysis_dict['camera_gps_distance_list'] = camera_gps_distance_list gps_camera_analysis_dict['rms_distance'] = rms_distance # Return. return gps_camera_analysis_dict def compute_model_azimuth_elevation(self, hel_name, time_begin, time_mid, time_end): - (azimuth_begin, elevation_begin) = ( - self.compute_model_azimuth_elevation_given_time(hel_name, time_begin) - ) - azimuth_mid, elevation_mid = self.compute_model_azimuth_elevation_given_time( - hel_name, time_mid - ) - azimuth_end, elevation_end = self.compute_model_azimuth_elevation_given_time( - hel_name, time_end - ) + (azimuth_begin, elevation_begin) = self.compute_model_azimuth_elevation_given_time(hel_name, time_begin) + azimuth_mid, elevation_mid = self.compute_model_azimuth_elevation_given_time(hel_name, time_mid) + azimuth_end, elevation_end = self.compute_model_azimuth_elevation_given_time(hel_name, time_end) # Return. - return ( - azimuth_begin, - elevation_begin, - azimuth_mid, - elevation_mid, - azimuth_end, - elevation_end, - ) + return (azimuth_begin, elevation_begin, azimuth_mid, elevation_mid, azimuth_end, elevation_end) def compute_model_azimuth_elevation_given_time(self, hel_name, time): heliostat = self.solar_field.lookup_heliostat(hel_name) @@ -2041,12 +1726,9 @@ def check_az_el(self, az, el, gps_uxyz, camera_uxyz): return delta_uxyz def compute_log_azimuth_elevation(self, hel_name, time_begin, time_mid, time_end): - ( - azimuth_begin, - elevation_begin, - azimuth_target_begin, - elevation_target_begin, - ) = self.compute_log_azimuth_elevation_given_time(hel_name, time_begin) + (azimuth_begin, elevation_begin, azimuth_target_begin, elevation_target_begin) = ( + self.compute_log_azimuth_elevation_given_time(hel_name, time_begin) + ) (azimuth_mid, elevation_mid, azimuth_target_mid, elevation_target_mid) = ( self.compute_log_azimuth_elevation_given_time(hel_name, time_mid) ) @@ -2081,16 +1763,14 @@ def set_per_heliostat_estimates_of_camera_xyz_given_overall_time(self): frames_dict = self.hel_frames_dict[hel_name] for frame_id in frames_dict.keys(): frame_parameters_dict = frames_dict[frame_id] - camera_xyz_heliostat = frame_parameters_dict[ - 'camera_xyz_in_heliostat_coords' - ] + camera_xyz_heliostat = frame_parameters_dict['camera_xyz_in_heliostat_coords'] # ?? SCAFFOLDING RCB -- HERE WE COULD USE THE SPECIFIC TIME OF EACH FRAME, AND UPDATE HELIOSTAT TRACKING ANGLE TO CORRESPOND TO THAT TIME, BEFORE CALLING heliostat.transform_xyz(). HOWEVER, THIS IS NOT REQUIRED FOR SHORT FLIGHTS, SINCE THE DIFFERENCE WILL BE SMALL, AND IT IS REFINED LATER camera_xyz_world = list( heliostat.transform_xyz(camera_xyz_heliostat) ) # transform_xyz() returns a numpy array. - frame_parameters_dict[ - 'per_heliostat_estimate_of_camera_xyz_in_world_coords_overall_time' - ] = camera_xyz_world + frame_parameters_dict['per_heliostat_estimate_of_camera_xyz_in_world_coords_overall_time'] = ( + camera_xyz_world + ) # Camera passes. self.hel_transformed_camera_passes_dict = {} for hel_name in self.hel_camera_passes_dict.keys(): @@ -2114,21 +1794,15 @@ def set_per_heliostat_estimates_of_camera_xyz_given_overall_time(self): ) transformed_camera_pass_list.append(transformed_camera_pass) # Add to result. - self.hel_transformed_camera_passes_dict[hel_name] = ( - transformed_camera_pass_list - ) + self.hel_transformed_camera_passes_dict[hel_name] = transformed_camera_pass_list def set_per_heliosat_configurations_from_gps_camera_alignment(self): """ Sets the configuration of each heliostat for which we found an (az,el) configuraiton using the alignment method. """ for hel_name in self.hel_gps_camera_analysis_dict.keys(): - list_of_gps_camera_analysis_dicts = self.hel_gps_camera_analysis_dict[ - hel_name - ] - gps_camera_analysis_dict = list_of_gps_camera_analysis_dicts[ - 0 - ] # Arbitrarily choose first orientation. + list_of_gps_camera_analysis_dicts = self.hel_gps_camera_analysis_dict[hel_name] + gps_camera_analysis_dict = list_of_gps_camera_analysis_dicts[0] # Arbitrarily choose first orientation. azimuth = gps_camera_analysis_dict['azimuth_from_alignment'] elevation = gps_camera_analysis_dict['elevation_from_alignment'] heliostat = self.solar_field.lookup_heliostat(hel_name) @@ -2143,9 +1817,7 @@ def set_per_heliosat_configurations_from_gps_camera_alignment(self): # PRINT KEY RESULTS def print_flight_log_df(self): - print( - 'In TrajectoryAnalysis.print_flight_log_df(), flight_log_df, after adding columns:' - ) + print('In TrajectoryAnalysis.print_flight_log_df(), flight_log_df, after adding columns:') print(self.flight_log_df) def print_gps_flight_log_zero_seconds(self): @@ -2155,46 +1827,18 @@ def print_gps_flight_log_zero_seconds(self): ) def print_velocity_xy_change_points(self): - print( - 'In TrajectoryAnalysis.print_velocity_xy_change_points(), velocity_xy_change minima:' - ) - lt.print_list( - self.gps_velocity_xy_change_minima, - indent=4, - max_items=50, - max_item_length=200, - ) - print( - 'In TrajectoryAnalysis.print_velocity_xy_change_points(), velocity_xy_change maxima:' - ) - lt.print_list( - self.gps_velocity_xy_change_maxima, - indent=4, - max_items=50, - max_item_length=200, - ) + print('In TrajectoryAnalysis.print_velocity_xy_change_points(), velocity_xy_change minima:') + lt.print_list(self.gps_velocity_xy_change_minima, indent=4, max_items=50, max_item_length=200) + print('In TrajectoryAnalysis.print_velocity_xy_change_points(), velocity_xy_change maxima:') + lt.print_list(self.gps_velocity_xy_change_maxima, indent=4, max_items=50, max_item_length=200) def print_gps_scan_pass_summary(self): # Maximum to minimum. - print( - '\nIn TrajectoryAnalysis.print_gps_scan_pass_summary(), self.maximum_to_minimum_pass_pair_list:' - ) - lt.print_list( - self.maximum_to_minimum_pass_pair_list, - indent=4, - max_items=50, - max_item_length=2000, - ) + print('\nIn TrajectoryAnalysis.print_gps_scan_pass_summary(), self.maximum_to_minimum_pass_pair_list:') + lt.print_list(self.maximum_to_minimum_pass_pair_list, indent=4, max_items=50, max_item_length=2000) # Minimum to maximum. - print( - '\nIn TrajectoryAnalysis.print_gps_scan_pass_summary(), self.minimum_to_maximum_pass_pair_list:' - ) - lt.print_list( - self.minimum_to_maximum_pass_pair_list, - indent=4, - max_items=50, - max_item_length=2000, - ) + print('\nIn TrajectoryAnalysis.print_gps_scan_pass_summary(), self.minimum_to_maximum_pass_pair_list:') + lt.print_list(self.minimum_to_maximum_pass_pair_list, indent=4, max_items=50, max_item_length=2000) # GPS scan passes. print('\nIn TrajectoryAnalysis.print_gps_scan_pass_summary(), gps_scan_passes:') gps_scan_pass_idx = 0 @@ -2251,12 +1895,8 @@ def print_synchronization_pair_list(self): ) def print_hel_camera_passes_dict(self, max_heliostats=4): - print( - '\nIn TrajectoryAnalysis.print_hel_camera_passes_dict(), heliostat camera passes:' - ) - self.print_hel_camera_passes_dict_aux( - self.hel_camera_passes_dict, max_heliostats - ) + print('\nIn TrajectoryAnalysis.print_hel_camera_passes_dict(), heliostat camera passes:') + self.print_hel_camera_passes_dict_aux(self.hel_camera_passes_dict, max_heliostats) def print_hel_camera_passes_dict_aux(self, hel_camera_passes_dict, max_heliostats): # Heliostat entries. @@ -2281,44 +1921,31 @@ def print_hel_camera_passes_dict_aux(self, hel_camera_passes_dict, max_heliostat print('...') def print_hel_gps_camera_analysis_dict(self, max_heliostats=4): - print( - '\nIn TrajectoryAnalysis.print_hel_gps_camera_analysis_dict(), heliostat GPS-camera analysis data:' - ) - self.print_hel_gps_camera_analysis_dict_aux( - self.hel_gps_camera_analysis_dict, max_heliostats - ) + print('\nIn TrajectoryAnalysis.print_hel_gps_camera_analysis_dict(), heliostat GPS-camera analysis data:') + self.print_hel_gps_camera_analysis_dict_aux(self.hel_gps_camera_analysis_dict, max_heliostats) - def print_hel_gps_camera_analysis_dict_aux( - self, hel_gps_camera_analysis_dict, max_heliostats - ): - print( - '\nIn TrajectoryAnalysis.print_hel_gps_camera_analysis_dict(), heliostat GPS-camera analysis data:' - ) + def print_hel_gps_camera_analysis_dict_aux(self, hel_gps_camera_analysis_dict, max_heliostats): + print('\nIn TrajectoryAnalysis.print_hel_gps_camera_analysis_dict(), heliostat GPS-camera analysis data:') # Heliostat entries. hel_count = 1 for hel_name in dt.sorted_keys(hel_gps_camera_analysis_dict): print(str(hel_name) + ':') list_of_gps_camera_analysis_dicts = hel_gps_camera_analysis_dict[hel_name] for gps_camera_analysis_dict in list_of_gps_camera_analysis_dicts: - transformed_camera_begin_xyzt = gps_camera_analysis_dict[ - 'per_heliostat_transformed_camera_pass' - ]['stable_begin_xyzt'] - transformed_camera_end_xyzt = gps_camera_analysis_dict[ - 'per_heliostat_transformed_camera_pass' - ]['stable_end_xyzt'] - gps_begin_xyzt = gps_camera_analysis_dict['gps_pass'][ + transformed_camera_begin_xyzt = gps_camera_analysis_dict['per_heliostat_transformed_camera_pass'][ 'stable_begin_xyzt' ] + transformed_camera_end_xyzt = gps_camera_analysis_dict['per_heliostat_transformed_camera_pass'][ + 'stable_end_xyzt' + ] + gps_begin_xyzt = gps_camera_analysis_dict['gps_pass']['stable_begin_xyzt'] gps_end_xyzt = gps_camera_analysis_dict['gps_pass']['stable_end_xyzt'] azimuth = gps_camera_analysis_dict['azimuth_from_alignment'] elevation = gps_camera_analysis_dict['elevation_from_alignment'] alignment_error = gps_camera_analysis_dict['alignment_angle_error'] rms_distance = gps_camera_analysis_dict['rms_distance'] print( - ' Transformed camera pass:', - transformed_camera_begin_xyzt, - ' --> ', - transformed_camera_end_xyzt, + ' Transformed camera pass:', transformed_camera_begin_xyzt, ' --> ', transformed_camera_end_xyzt ) print(' GPS pass: ', gps_begin_xyzt, ' --> ', gps_end_xyzt) print(' Azimuth: ', azimuth, ' rad') @@ -2344,32 +1971,20 @@ def draw_and_save_solar_field_suite(self): color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. self.solar_field_style = self.solar_field_default_style - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) # Drawing styles. - self.flight_log_style = rcps.data_curve( - color='grey', linewidth=0.25, markersize=0.5 - ) + self.flight_log_style = rcps.data_curve(color='grey', linewidth=0.25, markersize=0.5) self.minimum_style = rcps.marker(color='m', markersize=0.8) self.maximum_style = rcps.marker(color='r', markersize=0.8) self.max_to_min_color = 'g' self.min_to_max_color = 'r' - self.max_to_min_scan_pass_style = rcps.outline( - color=self.max_to_min_color, linewidth=0.5 - ) - self.min_to_max_scan_pass_style = rcps.outline( - color=self.min_to_max_color, linewidth=0.5 - ) + self.max_to_min_scan_pass_style = rcps.outline(color=self.max_to_min_color, linewidth=0.5) + self.min_to_max_scan_pass_style = rcps.outline(color=self.min_to_max_color, linewidth=0.5) self.scan_pass_color = 'k' self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) - self.trajectory_fragment_style = rcps.data_curve( - color='r', linewidth=0.25, markersize=0.4 - ) + self.trajectory_fragment_style = rcps.data_curve(color='r', linewidth=0.25, markersize=0.4) self.camera_pass_color_wheel = ['red', 'green', 'blue', 'magenta', 'goldenrod'] self.trajectory_fragment_color_wheel = [ 'coral', @@ -2379,15 +1994,11 @@ def draw_and_save_solar_field_suite(self): 'gold', ] # Lighter version of each camera pass color. self.synchronization_point_style = rcps.marker(color='r', markersize=1) - self.camera_pass_style = rcps.data_curve( - color='r', linewidth=0.1, markersize=0.6 - ) + self.camera_pass_style = rcps.data_curve(color='r', linewidth=0.1, markersize=0.6) self.pass_connection_style = rcps.outline(color='r', linewidth=0.1) # What to include in each figure. - draw_control_dict = ( - {} - ) # ?? SCAFFOLDING RCB -- PROBABLY REPLACE WITH A REAL RENDER CONTROL CLASS OBJECT. + draw_control_dict = {} # ?? SCAFFOLDING RCB -- PROBABLY REPLACE WITH A REAL RENDER CONTROL CLASS OBJECT. draw_control_dict['draw_GPS_log'] = False draw_control_dict['draw_gps_velocity_xy_change_minima'] = False draw_control_dict['draw_gps_velocity_xy_change_maxima'] = False @@ -2472,9 +2083,7 @@ def draw_and_save_solar_field_suite(self): aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz_minus_1_hour ) self.solar_field_style = rcsf.heliostat_normals_outlines(color='k') - aimpoint_str = '({x},{y},{z})'.format( - x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2] - ) + aimpoint_str = '({x},{y},{z})'.format(x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2]) date_str = '{m}/{d}/{y}'.format( m=self.when_ymdhmsz_minus_1_hour[1], d=self.when_ymdhmsz_minus_1_hour[2], @@ -2503,13 +2112,10 @@ def draw_and_save_solar_field_suite(self): # Draw the solar field nominal tracking, a half-hour before the flight. self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, - when_ymdhmsz=self.when_ymdhmsz_minus_30_minutes, + aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz_minus_30_minutes ) self.solar_field_style = rcsf.heliostat_normals_outlines(color='k') - aimpoint_str = '({x},{y},{z})'.format( - x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2] - ) + aimpoint_str = '({x},{y},{z})'.format(x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2]) date_str = '{m}/{d}/{y}'.format( m=self.when_ymdhmsz_minus_30_minutes[1], d=self.when_ymdhmsz_minus_30_minutes[2], @@ -2537,16 +2143,10 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field nominal tracking. - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) self.solar_field_style = rcsf.heliostat_normals_outlines(color='k') - aimpoint_str = '({x},{y},{z})'.format( - x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2] - ) - date_str = '{m}/{d}/{y}'.format( - m=self.when_ymdhmsz[1], d=self.when_ymdhmsz[2], y=self.when_ymdhmsz[0] - ) + aimpoint_str = '({x},{y},{z})'.format(x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2]) + date_str = '{m}/{d}/{y}'.format(m=self.when_ymdhmsz[1], d=self.when_ymdhmsz[2], y=self.when_ymdhmsz[0]) time_str = '{h:d}:{m:02d}:{s:02d}'.format( h=self.when_ymdhmsz[3], m=self.when_ymdhmsz[4], s=self.when_ymdhmsz[5] ) @@ -2567,15 +2167,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field nominal tracking, with exceptions of the day (showing normals). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.solar_field_style = rcsf.heliostat_normals_outlines(color='grey') self.heliostat_up_style = rch.normal_outline( color='lightblue' @@ -2583,28 +2177,15 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.normal_outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) - aimpoint_str = '({x},{y},{z})'.format( - x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2] - ) - date_str = '{m}/{d}/{y}'.format( - m=self.when_ymdhmsz[1], d=self.when_ymdhmsz[2], y=self.when_ymdhmsz[0] - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) + aimpoint_str = '({x},{y},{z})'.format(x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2]) + date_str = '{m}/{d}/{y}'.format(m=self.when_ymdhmsz[1], d=self.when_ymdhmsz[2], y=self.when_ymdhmsz[0]) time_str = '{h:d}:{m:02d}:{s:02d}'.format( h=self.when_ymdhmsz[3], m=self.when_ymdhmsz[4], s=self.when_ymdhmsz[5] ) self.draw_and_save_solar_field_trajectories( - 'AF. Actual ' - + self.solar_field.short_name - + ' Field Configuration on ' - + date_str - + ' at ' - + time_str, + 'AF. Actual ' + self.solar_field.short_name + ' Field Configuration on ' + date_str + ' at ' + time_str, draw_control_dict, limits_3d_list=limits_3d_list, limits_xy_list=limits_xy_list, @@ -2613,15 +2194,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field nominal tracking, with exceptions of the day. - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.solar_field_style = rcsf.heliostat_outlines(color='grey') self.heliostat_up_style = rch.outline( color='lightblue' @@ -2629,28 +2204,15 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) - aimpoint_str = '({x},{y},{z})'.format( - x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2] - ) - date_str = '{m}/{d}/{y}'.format( - m=self.when_ymdhmsz[1], d=self.when_ymdhmsz[2], y=self.when_ymdhmsz[0] - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) + aimpoint_str = '({x},{y},{z})'.format(x=self.aimpoint_xyz[0], y=self.aimpoint_xyz[1], z=self.aimpoint_xyz[2]) + date_str = '{m}/{d}/{y}'.format(m=self.when_ymdhmsz[1], d=self.when_ymdhmsz[2], y=self.when_ymdhmsz[0]) time_str = '{h:d}:{m:02d}:{s:02d}'.format( h=self.when_ymdhmsz[3], m=self.when_ymdhmsz[4], s=self.when_ymdhmsz[5] ) self.draw_and_save_solar_field_trajectories( - 'AG. Actual ' - + self.solar_field.short_name - + ' Field Configuration on ' - + date_str - + ' at ' - + time_str, + 'AG. Actual ' + self.solar_field.short_name + ' Field Configuration on ' + date_str + ' at ' + time_str, draw_control_dict, limits_3d_list=limits_3d_list, limits_xy_list=limits_xy_list, @@ -2659,15 +2221,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field and GPS trajectory. - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) draw_control_dict['draw_GPS_log'] = True self.solar_field_style = rcsf.heliostat_outlines(color='grey') self.heliostat_up_style = rch.outline( @@ -2676,12 +2232,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('b') self.draw_and_save_solar_field_trajectories( 'AH. GPS Trajectory Over Solar Field', @@ -2693,15 +2245,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with GPS trajectory, with min/max scan pass construction steps identfiied. - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_velocity_xy_change_minima'] = True draw_control_dict['draw_gps_velocity_xy_change_maxima'] = True @@ -2714,12 +2260,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AI. GPS Scan Pass Min/Max Construction', @@ -2731,15 +2273,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with GPS trajectory, with final scan passes identfiied. - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_velocity_xy_change_minima'] = False draw_control_dict['draw_gps_velocity_xy_change_maxima'] = False @@ -2753,12 +2289,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AJ. Inferred GPS Scan Passes', @@ -2784,12 +2316,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AK. Inferred GPS Scan Passes (All Heliostats Face Up)', @@ -2815,12 +2343,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AL. Inferred GPS Scan Pass Reference Rendering (All Heliostats Face Up)', @@ -2832,16 +2356,12 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, used for time synchronization (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_scan_passes'] = False draw_control_dict['draw_trajectory_fragments'] = True - draw_control_dict['trajectory_fragment_selected_heliostats'] = ( - self.synchronization_heliostat_name_list() - ) + draw_control_dict['trajectory_fragment_selected_heliostats'] = self.synchronization_heliostat_name_list() draw_control_dict['connect_trajectory_fragments'] = True draw_control_dict['draw_synchronization_points'] = True draw_control_dict['include_points_with_missing_corners'] = True @@ -2853,12 +2373,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') # Sub views for point synchronization plots. synch_limits_3d_list = None @@ -2892,12 +2408,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AN. Trajectory Fragments for Each Heliostat Connected (Face Up)', @@ -2926,12 +2438,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AO. Per Heliostat Scan Fragments Including Partial (Face Up)', @@ -2952,12 +2460,8 @@ def draw_and_save_solar_field_suite(self): new_h_config = hc.HeliostatConfiguration(az, el) new_h_config.az = np.radians(180.0) heliostat.set_configuration(new_h_config, clear_tracking=True) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_trajectory_fragments'] = True draw_control_dict['connect_trajectory_fragments'] = False @@ -2971,12 +2475,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AP. Per Heliostat Scan Fragments Including Partial (Az South, Elev Tracking Nominal Minus 1 hour)', @@ -2991,12 +2491,8 @@ def draw_and_save_solar_field_suite(self): self.solar_field.set_full_field_tracking( aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz_minus_1_hour ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_trajectory_fragments'] = True draw_control_dict['connect_trajectory_fragments'] = False @@ -3010,12 +2506,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AQ. Per Heliostat Scan Fragments Including Partial (Tracking Nominal Minus 1 hour)', @@ -3028,15 +2520,10 @@ def draw_and_save_solar_field_suite(self): # Draw the solar field with the trajectory fragments inferred from the reconstruction, including points viewing partial heliostats (tracking minus 30 minutes). self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, - when_ymdhmsz=self.when_ymdhmsz_minus_30_minutes, - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration + aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz_minus_30_minutes ) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_trajectory_fragments'] = True draw_control_dict['connect_trajectory_fragments'] = False @@ -3050,12 +2537,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AR. Per Heliostat Scan Fragments Including Partial (Tracking Nominal Minus 30 Minutes)', @@ -3067,15 +2550,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, including points viewing partial heliostats (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_trajectory_fragments'] = True draw_control_dict['connect_trajectory_fragments'] = False @@ -3089,12 +2566,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AS. Per Heliostat Scan Fragments Including Partial (Tracking Nominal Time)', @@ -3106,15 +2579,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, without points viewing partial heliostats (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_trajectory_fragments'] = True draw_control_dict['connect_trajectory_fragments'] = False @@ -3128,12 +2595,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AT. Per Heliostat Scan Fragments After Discarding Partial (Tracking Nominal Time)', @@ -3145,15 +2608,9 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, after refining (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) - self.solar_field.set_heliostats_configuration( - self.up_heliostats, self.up_configuration - ) - self.solar_field.set_heliostats_configuration( - self.down_heliostats, self.down_configuration - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) + self.solar_field.set_heliostats_configuration(self.up_heliostats, self.up_configuration) + self.solar_field.set_heliostats_configuration(self.down_heliostats, self.down_configuration) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_trajectory_fragments'] = True draw_control_dict['connect_trajectory_fragments'] = False @@ -3167,12 +2624,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AU. Per Heliostat Scan Fragments After Discarding Partial (Tracking Nominal Time)', @@ -3184,9 +2637,7 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, after refining and including camera scan passes (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_scan_passes'] = True @@ -3204,12 +2655,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AV. Per Heliostat Scan Pass Fit Lines (Tracking Nominal Time)', @@ -3221,9 +2668,7 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, after refining and including camera scan passes and association with corresponding gps scan lines (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_scan_passes'] = True @@ -3242,12 +2687,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.draw_and_save_solar_field_trajectories( 'AW. Camera Scan Associations with GPS Scan Lines, Full Trajectory Fragments (Tracking Nominal Time)', @@ -3259,9 +2700,7 @@ def draw_and_save_solar_field_suite(self): ) # Draw the solar field with the trajectory fragments inferred from the reconstruction, after refining and including camera scan passes and association with corresponding gps scan lines (tracking nominal time). - self.solar_field.set_full_field_tracking( - aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz - ) + self.solar_field.set_full_field_tracking(aimpoint_xyz=self.aimpoint_xyz, when_ymdhmsz=self.when_ymdhmsz) self.set_per_heliostat_estimates_of_camera_xyz_given_overall_time() draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_scan_passes'] = True @@ -3272,9 +2711,7 @@ def draw_and_save_solar_field_suite(self): draw_control_dict['include_points_with_missing_corners'] = False draw_control_dict['include_non_refined_points'] = False draw_control_dict['draw_camera_passes'] = True - draw_control_dict['draw_gps_transformed_camera_pass_connections'] = ( - True # False #True - ) + draw_control_dict['draw_gps_transformed_camera_pass_connections'] = True # False #True self.solar_field_style = rcsf.heliostat_outlines(color='grey') self.heliostat_up_style = rch.outline( color='lightblue' @@ -3282,12 +2719,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.4) self.draw_and_save_solar_field_trajectories( @@ -3321,12 +2754,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3360,12 +2789,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3397,12 +2822,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3436,12 +2857,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3476,12 +2893,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3516,12 +2929,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3556,12 +2965,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.flight_log_style.set_color('grey') self.scan_pass_style = rcps.outline(color=self.scan_pass_color, linewidth=0.1) self.draw_and_save_solar_field_trajectories( @@ -3596,12 +3001,8 @@ def draw_and_save_solar_field_suite(self): self.heliostat_down_style = rch.outline( color='salmon' ) # Use normal_outline() to include surface normal needles, or outline() for just outlines. - self.solar_field_style.heliostat_styles.add_special_names( - up_heliostats, self.heliostat_up_style - ) - self.solar_field_style.heliostat_styles.add_special_names( - down_heliostats, self.heliostat_down_style - ) + self.solar_field_style.heliostat_styles.add_special_names(up_heliostats, self.heliostat_up_style) + self.solar_field_style.heliostat_styles.add_special_names(down_heliostats, self.heliostat_down_style) self.draw_and_save_solar_field_trajectories( 'BF. Measured Solar Field Configuration After GPS-Camera Alignment (Per-Heliostat Tracking)', draw_control_dict, @@ -3630,47 +3031,27 @@ def draw_and_save_solar_field_trajectories( output_figure_body = ft.convert_string_to_file_body(title) # 3-d oblique view. if limits_3d_list != None: - view_3d = self.draw_solar_field_trajectories( - title, draw_control_dict, vs.view_spec_3d() - ) + view_3d = self.draw_solar_field_trajectories(title, draw_control_dict, vs.view_spec_3d()) view_3d.show_and_save_multi_axis_limits( - self.output_data_dir, - output_figure_body, - limits_list=limits_3d_list, - grid=True, + self.output_data_dir, output_figure_body, limits_list=limits_3d_list, grid=True ) # xy view. if limits_xy_list != None: - view_xy = self.draw_solar_field_trajectories( - title, draw_control_dict, vs.view_spec_xy() - ) + view_xy = self.draw_solar_field_trajectories(title, draw_control_dict, vs.view_spec_xy()) view_xy.show_and_save_multi_axis_limits( - self.output_data_dir, - output_figure_body, - limits_list=limits_xy_list, - grid=False, + self.output_data_dir, output_figure_body, limits_list=limits_xy_list, grid=False ) # xz_view. if limits_xz_list != None: - view_xz = self.draw_solar_field_trajectories( - title, draw_control_dict, vs.view_spec_xz() - ) + view_xz = self.draw_solar_field_trajectories(title, draw_control_dict, vs.view_spec_xz()) view_xz.show_and_save_multi_axis_limits( - self.output_data_dir, - output_figure_body, - limits_list=limits_xz_list, - grid=True, + self.output_data_dir, output_figure_body, limits_list=limits_xz_list, grid=True ) # yz view. if limits_yz_list != None: - view_yz = self.draw_solar_field_trajectories( - title, draw_control_dict, vs.view_spec_yz() - ) + view_yz = self.draw_solar_field_trajectories(title, draw_control_dict, vs.view_spec_yz()) view_yz.show_and_save_multi_axis_limits( - self.output_data_dir, - output_figure_body, - limits_list=limits_yz_list, - grid=True, + self.output_data_dir, output_figure_body, limits_list=limits_yz_list, grid=True ) def draw_solar_field_trajectories( @@ -3679,20 +3060,11 @@ def draw_solar_field_trajectories( # Solar field. # Required, because this creates the view. view = self.solar_field.draw_figure( - self.figure_control, - self.axis_control_m, - view_spec, - title, - self.solar_field_style, - grid=grid, + self.figure_control, self.axis_control_m, view_spec, title, self.solar_field_style, grid=grid ) # GPS flight log. if draw_control_dict['draw_GPS_log']: - view.draw_xyz_list( - self.flight_log_xyz_list, - style=self.flight_log_style, - label='GPS Flight Log', - ) + view.draw_xyz_list(self.flight_log_xyz_list, style=self.flight_log_style, label='GPS Flight Log') if draw_control_dict['draw_gps_velocity_xy_change_minima']: minima_xyz_list = [] for minimum_dict in self.gps_velocity_xy_change_minima: @@ -3700,9 +3072,7 @@ def draw_solar_field_trajectories( y = minimum_dict['y'] z = minimum_dict['z'] minima_xyz_list.append([x, y, z]) - view.draw_xyz_list( - minima_xyz_list, style=self.minimum_style, label='Delta Vx Minima' - ) + view.draw_xyz_list(minima_xyz_list, style=self.minimum_style, label='Delta Vx Minima') if draw_control_dict['draw_gps_velocity_xy_change_maxima']: maxima_xyz_list = [] for maximum_dict in self.gps_velocity_xy_change_maxima: @@ -3710,9 +3080,7 @@ def draw_solar_field_trajectories( y = maximum_dict['y'] z = maximum_dict['z'] maxima_xyz_list.append([x, y, z]) - view.draw_xyz_list( - maxima_xyz_list, style=self.maximum_style, label='Delta Vx Maxima' - ) + view.draw_xyz_list(maxima_xyz_list, style=self.maximum_style, label='Delta Vx Maxima') if draw_control_dict['draw_gps_max_to_min_scan_passes']: for max_min_pass_pair in self.maximum_to_minimum_pass_pair_list: maximum_dict = max_min_pass_pair[0] @@ -3725,9 +3093,7 @@ def draw_solar_field_trajectories( minimum_y = minimum_dict['y'] minimum_z = minimum_dict['z'] minimum_xyz = [minimum_x, minimum_y, minimum_z] - view.draw_xyz_list( - [maximum_xyz, minimum_xyz], style=self.max_to_min_scan_pass_style - ) + view.draw_xyz_list([maximum_xyz, minimum_xyz], style=self.max_to_min_scan_pass_style) if draw_control_dict['draw_gps_min_to_max_scan_passes']: for min_max_pass_pair in self.minimum_to_maximum_pass_pair_list: minimum_dict = min_max_pass_pair[0] @@ -3740,9 +3106,7 @@ def draw_solar_field_trajectories( maximum_y = maximum_dict['y'] maximum_z = maximum_dict['z'] maximum_xyz = [maximum_x, maximum_y, maximum_z] - view.draw_xyz_list( - [minimum_xyz, maximum_xyz], style=self.min_to_max_scan_pass_style - ) + view.draw_xyz_list([minimum_xyz, maximum_xyz], style=self.min_to_max_scan_pass_style) if draw_control_dict['draw_synchronization_points']: synch_time_list = self.synchronization_time_list() synch_xyz_list = [] @@ -3765,30 +3129,19 @@ def draw_solar_field_trajectories( ): idx = 0 for hel_name in self.hel_frames_dict.keys(): - trajectory_fragment_color = color.color( - idx, self.trajectory_fragment_color_wheel - ) + trajectory_fragment_color = color.color(idx, self.trajectory_fragment_color_wheel) camera_pass_color = color.color(idx, self.camera_pass_color_wheel) # Draw the fragments associated with this heliostat. if draw_control_dict['draw_trajectory_fragments'] and ( - ( - draw_control_dict['trajectory_fragment_selected_heliostats'] - == None - ) - or ( - hel_name - in draw_control_dict['trajectory_fragment_selected_heliostats'] - ) + (draw_control_dict['trajectory_fragment_selected_heliostats'] == None) + or (hel_name in draw_control_dict['trajectory_fragment_selected_heliostats']) ): # Draw the trajectory fragment for this heliostat. frames_dict = self.hel_frames_dict[hel_name] camera_xyz_world_list = [] for frame_id in frames_dict.keys(): frame_parameters_dict = frames_dict[frame_id] - if ( - draw_control_dict['include_points_with_missing_corners'] - == True - ) or ( + if (draw_control_dict['include_points_with_missing_corners'] == True) or ( frame_parameters_dict['n_missing'] <= self.maximum_n_missing ): camera_xyz_world = frame_parameters_dict[ @@ -3800,13 +3153,8 @@ def draw_solar_field_trajectories( if draw_control_dict['connect_trajectory_fragments']: view.draw_xyz_list(camera_xyz_world_list, style=style) else: - list_of_camera_xyz_lists = ( - self.split_xyz_list_where_distance_exceeds_maximum( - camera_xyz_world_list, - draw_control_dict[ - 'trajectory_fragment_disconnect_threshold' - ], - ) + list_of_camera_xyz_lists = self.split_xyz_list_where_distance_exceeds_maximum( + camera_xyz_world_list, draw_control_dict['trajectory_fragment_disconnect_threshold'] ) for camera_xyz_list in list_of_camera_xyz_lists: view.draw_xyz_list(camera_xyz_list, style=style) @@ -3814,12 +3162,8 @@ def draw_solar_field_trajectories( if draw_control_dict['draw_synchronization_points']: # Draw the synchronization point for this heliostat. frames_dict = self.hel_frames_dict[hel_name] - synchronization_frame_id = self.get_synchronization_frame_id( - hel_name - ) - synchronization_frame_parameters_dict = frames_dict[ - synchronization_frame_id - ] + synchronization_frame_id = self.get_synchronization_frame_id(hel_name) + synchronization_frame_parameters_dict = frames_dict[synchronization_frame_id] synchronization_camera_xyz_world = synchronization_frame_parameters_dict[ 'per_heliostat_estimate_of_camera_xyz_in_world_coords_overall_time' ] @@ -3830,9 +3174,7 @@ def draw_solar_field_trajectories( view.draw_xyz(synchronization_camera_xyz_world, style=style) # Draw the camera passes associated with each heliostat. if draw_control_dict['draw_camera_passes']: - transformed_camera_pass_list = ( - self.hel_transformed_camera_passes_dict[hel_name] - ) + transformed_camera_pass_list = self.hel_transformed_camera_passes_dict[hel_name] for transformed_camera_pass in transformed_camera_pass_list: begin_xyz = transformed_camera_pass['stable_begin_xyzt'][0:3] end_xyz = transformed_camera_pass['stable_end_xyzt'][0:3] @@ -3842,82 +3184,52 @@ def draw_solar_field_trajectories( ) # Similar color, but slightly darker than trajectory fragment. view.draw_xyz_list([begin_xyz, end_xyz], style=style) # Draw connecting lines between the camera passes and the corresponding GPS scan lines. - if draw_control_dict[ - 'draw_gps_transformed_camera_pass_connections' - ]: + if draw_control_dict['draw_gps_transformed_camera_pass_connections']: style = copy.deepcopy(self.pass_connection_style) style.set_color(trajectory_fragment_color) - gps_pass = self.find_matching_gps_pass( - transformed_camera_pass - ) + gps_pass = self.find_matching_gps_pass(transformed_camera_pass) if gps_pass != None: # Compute nearest-neighbor line segments. gps_line_3d = gps_pass['line_3d'] camera_gps_point_pair_list = [] - for transformed_camera_xyzt in transformed_camera_pass[ - 'inlier_xyzt_list' - ]: - transformed_camera_inlier_xyz = ( - transformed_camera_xyzt[0:3] - ) + for transformed_camera_xyzt in transformed_camera_pass['inlier_xyzt_list']: + transformed_camera_inlier_xyz = transformed_camera_xyzt[0:3] nearest_gps_xyz = g3d.closest_point_on_line_3d( transformed_camera_inlier_xyz, gps_line_3d ) - view.draw_xyz_list( - [ - transformed_camera_inlier_xyz, - nearest_gps_xyz, - ], - style=style, - ) + view.draw_xyz_list([transformed_camera_inlier_xyz, nearest_gps_xyz], style=style) idx += 1 # GPS-camera alignment analysis. if draw_control_dict['draw_gps_camera_analysis']: idx = 0 for hel_name in self.hel_gps_camera_analysis_dict: - trajectory_fragment_color = color.color( - idx, self.trajectory_fragment_color_wheel - ) + trajectory_fragment_color = color.color(idx, self.trajectory_fragment_color_wheel) camera_pass_color = color.color(idx, self.camera_pass_color_wheel) - list_of_gps_camera_analysis_dicts = self.hel_gps_camera_analysis_dict[ - hel_name - ] + list_of_gps_camera_analysis_dicts = self.hel_gps_camera_analysis_dict[hel_name] for gps_camera_analysis_dict in list_of_gps_camera_analysis_dicts: per_heliostat_transformed_camera_pass = gps_camera_analysis_dict[ 'per_heliostat_transformed_camera_pass' ] - if draw_control_dict[ - 'draw_gps_transformed_camera_inlier_xyzt_points' - ]: - xyzt_list = per_heliostat_transformed_camera_pass[ - 'inlier_xyzt_list' - ] + if draw_control_dict['draw_gps_transformed_camera_inlier_xyzt_points']: + xyzt_list = per_heliostat_transformed_camera_pass['inlier_xyzt_list'] xyz_list = [xyzt[0:3] for xyzt in xyzt_list] style = copy.deepcopy(self.trajectory_fragment_style) style.set_color(trajectory_fragment_color) view.draw_xyz_list(xyz_list, style=style) if draw_control_dict['draw_gps_camera_pass']: - begin_xyz = per_heliostat_transformed_camera_pass[ - 'stable_begin_xyzt' - ][0:3] - end_xyz = per_heliostat_transformed_camera_pass[ - 'stable_end_xyzt' - ][0:3] + begin_xyz = per_heliostat_transformed_camera_pass['stable_begin_xyzt'][0:3] + end_xyz = per_heliostat_transformed_camera_pass['stable_end_xyzt'][0:3] style = copy.deepcopy(self.camera_pass_style) style.set_color(camera_pass_color) view.draw_xyz_list([begin_xyz, end_xyz], style=style) # Draw connecting lines between the camera passes and the corresponding GPS scan lines. - if draw_control_dict[ - 'draw_gps_transformed_camera_pass_connections' - ]: + if draw_control_dict['draw_gps_transformed_camera_pass_connections']: style = copy.deepcopy(self.pass_connection_style) style.set_color(trajectory_fragment_color) - transformed_camera_gps_point_pair_list = ( - gps_camera_analysis_dict['camera_gps_point_pair_list'] - ) - for ( - camera_gps_point_pair - ) in transformed_camera_gps_point_pair_list: + transformed_camera_gps_point_pair_list = gps_camera_analysis_dict[ + 'camera_gps_point_pair_list' + ] + for camera_gps_point_pair in transformed_camera_gps_point_pair_list: camera_xyz = camera_gps_point_pair[0] gps_xyz = camera_gps_point_pair[1] view.draw_xyz_list([camera_xyz, gps_xyz], style=style) @@ -3935,21 +3247,9 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Positions', x_column='time(sec)', y_column_label_styles=[ - [ - 'x(m)', - 'x component', - rcps.outline(color='b', linewidth=data_linewidth), - ], - [ - 'y(m)', - 'y component', - rcps.outline(color='r', linewidth=data_linewidth), - ], - [ - 'z(m)', - 'z component', - rcps.outline(color='g', linewidth=data_linewidth), - ], + ['x(m)', 'x component', rcps.outline(color='b', linewidth=data_linewidth)], + ['y(m)', 'y component', rcps.outline(color='r', linewidth=data_linewidth)], + ['z(m)', 'z component', rcps.outline(color='g', linewidth=data_linewidth)], ], x_axis_label='time (sec)', y_axis_label='Position (m)', @@ -3964,21 +3264,9 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Velocities', x_column='time(sec)', y_column_label_styles=[ - [ - 'velocity_average_x(m/sec)', - 'x component', - rcps.outline(color='b', linewidth=data_linewidth), - ], - [ - 'velocity_average_y(m/sec)', - 'y component', - rcps.outline(color='r', linewidth=data_linewidth), - ], - [ - 'velocity_average_z(m/sec)', - 'z component', - rcps.outline(color='g', linewidth=data_linewidth), - ], + ['velocity_average_x(m/sec)', 'x component', rcps.outline(color='b', linewidth=data_linewidth)], + ['velocity_average_y(m/sec)', 'y component', rcps.outline(color='r', linewidth=data_linewidth)], + ['velocity_average_z(m/sec)', 'z component', rcps.outline(color='g', linewidth=data_linewidth)], ], x_axis_label='time (sec)', y_axis_label='Velocity (m/sec)', @@ -3993,11 +3281,7 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Speed', x_column='time(sec)', y_column_label_styles=[ - [ - 'speed_average(m/sec)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ], + ['speed_average(m/sec)', None, rcps.outline(color='b', linewidth=data_linewidth)], ['speed(mps)', None, rcps.outline(color='r', linewidth=data_linewidth)], ], x_axis_label='time (sec)', @@ -4015,13 +3299,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Speed Change', x_column='time(sec)', - y_column_label_styles=[ - [ - 'delta_speed(m/sec)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['delta_speed(m/sec)', None, rcps.outline(color='b', linewidth=data_linewidth)]], x_axis_label='time (sec)', y_axis_label='Change in Speed (m/sec)', x_axis_grid=True, @@ -4034,13 +3312,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Speed Change Magnitude', x_column='time(sec)', - y_column_label_styles=[ - [ - 'abs_delta_speed(m/sec)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['abs_delta_speed(m/sec)', None, rcps.outline(color='b', linewidth=data_linewidth)]], # y_column_label_styles = [ ['speed_average(m/sec)', None, rcps.data_curve(color='b', linewidth=data_linewidth, markersize=data_markersize)] ], x_axis_label='time (sec)', y_axis_label='Magnitude of Change in Speed (m/sec)', @@ -4054,13 +3326,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Velocity XY Angle', x_column='time(sec)', - y_column_label_styles=[ - [ - 'velocity_angle_xy(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['velocity_angle_xy(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)]], x_axis_label='time (sec)', y_axis_label='Velocity Angle in (x,y) Plane (rad)', x_axis_grid=True, @@ -4072,13 +3338,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Velocity XY Angle Zoom 1', x_column='time(sec)', - y_column_label_styles=[ - [ - 'velocity_angle_xy(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['velocity_angle_xy(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)]], x_axis_label='time (sec)', y_axis_label='Velocity Angle in (x,y) Plane (rad)', x_axis_grid=True, @@ -4092,13 +3352,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Velocity XY Angle Zoom 2', x_column='time(sec)', - y_column_label_styles=[ - [ - 'velocity_angle_xy(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['velocity_angle_xy(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)]], x_axis_label='time (sec)', y_axis_label='Velocity Angle in (x,y) Plane (rad)', x_axis_grid=True, @@ -4113,13 +3367,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Velocity Z Angle', x_column='time(sec)', - y_column_label_styles=[ - [ - 'velocity_angle_z(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['velocity_angle_z(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)]], x_axis_label='time (sec)', y_axis_label='Velocity Elevation Angle Above (x,y) Plane (rad)', x_axis_grid=True, @@ -4131,13 +3379,7 @@ def draw_and_save_gps_log_analysis_plots(self): self.flight_log_df, title='GPS Trajectory Velocity Z Angle Zoom', x_column='time(sec)', - y_column_label_styles=[ - [ - 'velocity_angle_z(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] - ], + y_column_label_styles=[['velocity_angle_z(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)]], x_axis_label='time (sec)', y_axis_label='Velocity Elevation Angle Above (x,y) Plane (rad)', x_axis_grid=True, @@ -4153,11 +3395,7 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Velocity XY Angle Change', x_column='time(sec)', y_column_label_styles=[ - [ - 'delta_velocity_angle_xy(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] + ['delta_velocity_angle_xy(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)] ], x_axis_label='time (sec)', y_axis_label='Change in Velocity Angle in (x,y) Plane (rad)', @@ -4174,10 +3412,7 @@ def draw_and_save_gps_log_analysis_plots(self): minimum_delta_velocity_angle_xy = minimum_dict['delta_velocity_angle_xy'] gp.add_xy_list_to_plot( figure_record, - [ - [time_maximum, maximum_delta_velocity_angle_xy], - [time_minimum, minimum_delta_velocity_angle_xy], - ], + [[time_maximum, maximum_delta_velocity_angle_xy], [time_minimum, minimum_delta_velocity_angle_xy]], rcps.outline(color=self.max_to_min_color), ) # Indicate the minimum-to-maximum pairs. @@ -4190,10 +3425,7 @@ def draw_and_save_gps_log_analysis_plots(self): maximum_delta_velocity_angle_xy = maximum_dict['delta_velocity_angle_xy'] gp.add_xy_list_to_plot( figure_record, - [ - [time_minimum, minimum_delta_velocity_angle_xy], - [time_maximum, maximum_delta_velocity_angle_xy], - ], + [[time_minimum, minimum_delta_velocity_angle_xy], [time_maximum, maximum_delta_velocity_angle_xy]], rcps.outline(color=self.min_to_max_color), ) # Indicate the local minima. @@ -4203,10 +3435,7 @@ def draw_and_save_gps_log_analysis_plots(self): delta_velocity_angle_xy = minimum_dict['delta_velocity_angle_xy'] minima_tdelta_list.append([time, delta_velocity_angle_xy]) gp.add_xy_list_to_plot( - figure_record, - minima_tdelta_list, - rcps.marker(color='m', markersize=data_markersize), - label='Minima', + figure_record, minima_tdelta_list, rcps.marker(color='m', markersize=data_markersize), label='Minima' ) # Indicate the local maxima. maxima_tdelta_list = [] @@ -4215,10 +3444,7 @@ def draw_and_save_gps_log_analysis_plots(self): delta_velocity_angle_xy = maximum_dict['delta_velocity_angle_xy'] maxima_tdelta_list.append([time, delta_velocity_angle_xy]) gp.add_xy_list_to_plot( - figure_record, - maxima_tdelta_list, - rcps.marker(color='r', markersize=data_markersize), - label='Maxima', + figure_record, maxima_tdelta_list, rcps.marker(color='r', markersize=data_markersize), label='Maxima' ) figure_record.save(self.output_data_dir) figure_record = pp.dataframe_plot( @@ -4227,11 +3453,7 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Velocity XY Angle Change Zoom', x_column='time(sec)', y_column_label_styles=[ - [ - 'delta_velocity_angle_xy(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] + ['delta_velocity_angle_xy(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)] ], x_axis_label='time (sec)', y_axis_label='Change in Velocity Angle in (x,y) Plane (rad)', @@ -4248,11 +3470,7 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Velocity Z Angle Change', x_column='time(sec)', y_column_label_styles=[ - [ - 'delta_velocity_angle_z(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] + ['delta_velocity_angle_z(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)] ], x_axis_label='time (sec)', y_axis_label='Change in Velocity Elevation Angle Above (x,y) Plane (rad)', @@ -4266,11 +3484,7 @@ def draw_and_save_gps_log_analysis_plots(self): title='GPS Trajectory Velocity Z Angle Change Zoom', x_column='time(sec)', y_column_label_styles=[ - [ - 'delta_velocity_angle_z(rad)', - None, - rcps.outline(color='b', linewidth=data_linewidth), - ] + ['delta_velocity_angle_z(rad)', None, rcps.outline(color='b', linewidth=data_linewidth)] ], x_axis_label='time (sec)', y_axis_label='Change in Velocity Elevation Angle Above (x,y) Plane (rad)', @@ -4457,15 +3671,11 @@ def draw_and_save_gps_log_analysis_plots(self): def save_enhanced_flight_log(self): if not (os.path.exists(self.output_data_dir)): os.makedirs(self.output_data_dir) - (input_flight_log_dir, input_flight_log_body, input_flight_log_ext) = ( - ft.path_components(self.input_flight_log_dir_body_ext) - ) - output_flight_log_plus_body_ext = ( - input_flight_log_body + '_plus' + input_flight_log_ext - ) - output_flight_log_plus_dir_body_ext = os.path.join( - self.output_data_dir, output_flight_log_plus_body_ext + (input_flight_log_dir, input_flight_log_body, input_flight_log_ext) = ft.path_components( + self.input_flight_log_dir_body_ext ) + output_flight_log_plus_body_ext = input_flight_log_body + '_plus' + input_flight_log_ext + output_flight_log_plus_dir_body_ext = os.path.join(self.output_data_dir, output_flight_log_plus_body_ext) print( 'In TrajectoryAnalysis.save_enhanced_flight_log(), saving enhanced flight log file :', output_flight_log_plus_dir_body_ext, @@ -4478,9 +3688,7 @@ def save_enhanced_flight_log(self): # SAVE GPS TIME CORRESPONDING TO FLIGHT LOG ZERO TIME def save_gps_ymdhmsz_given_flight_log_zero_seconds(self): - output_body = ( - self.input_video_body + '_gps_ymdhmsz_given_flight_log_zero_seconds' - ) + output_body = self.input_video_body + '_gps_ymdhmsz_given_flight_log_zero_seconds' explain = 'GPS time corresponding to flight log zero' # Convert list to dictionary, adding keys. output_dict = { @@ -4504,12 +3712,8 @@ def save_gps_ymdhmsz_given_flight_log_zero_seconds(self): # SAVE GPS VELOCITY XY CHANGE POINTS - def save_gps_velocity_xy_change_points( - self, list_of_gps_velocity_xy_change_dicts, minima_or_maxima_str - ): - output_body = ( - self.input_video_body + '_gps_velocity_xy_change_' + minima_or_maxima_str - ) + def save_gps_velocity_xy_change_points(self, list_of_gps_velocity_xy_change_dicts, minima_or_maxima_str): + output_body = self.input_video_body + '_gps_velocity_xy_change_' + minima_or_maxima_str explain = 'GPS velocity xy change ' + minima_or_maxima_str + ' file' return dt.save_list_of_one_level_dicts( list_of_gps_velocity_xy_change_dicts, @@ -4521,26 +3725,16 @@ def save_gps_velocity_xy_change_points( # SAVE GPS SCAN ANALYSIS MINIMA/MAXIMA PAIRS - def save_gps_velocity_xy_change_pairs( - self, pass_pair_list, maximum_to_minimum_or_minimum_to_maximum_str - ): + def save_gps_velocity_xy_change_pairs(self, pass_pair_list, maximum_to_minimum_or_minimum_to_maximum_str): output_body = ( self.input_video_body + '_gps_velocity_xy_change_' + maximum_to_minimum_or_minimum_to_maximum_str + '_pass_pair_list' ) - explain = ( - 'GPS velocity xy change ' - + maximum_to_minimum_or_minimum_to_maximum_str - + ' pass pair file' - ) + explain = 'GPS velocity xy change ' + maximum_to_minimum_or_minimum_to_maximum_str + ' pass pair file' return dt.save_list_of_one_level_dict_pairs( - pass_pair_list, - self.output_data_dir, - output_body, - explain, - error_if_dir_not_exist=False, + pass_pair_list, self.output_data_dir, output_body, explain, error_if_dir_not_exist=False ) # SAVE GPS SCAN PASSES @@ -4566,17 +3760,13 @@ def save_gps_scan_passes(self): def gps_scan_pass_heading_line(self): # Stable begin point. heading_line_str = '' - heading_line_str += ( - 'stable_begin_x,stable_begin_y,stable_begin_z,stable_begin_t' - ) + heading_line_str += 'stable_begin_x,stable_begin_y,stable_begin_z,stable_begin_t' # Stable end point. heading_line_str += ',' heading_line_str += 'stable_end_x,stable_end_y,stable_end_z,stable_end_t' # Embedding 3-d line. heading_line_str += ',' - heading_line_str += ( - 'line_3d_length,line_3d_ux,line_3d_uy,line_3d_uz,line_3d_theta,line_3d_eta' - ) + heading_line_str += 'line_3d_length,line_3d_ux,line_3d_uy,line_3d_uz,line_3d_theta,line_3d_eta' # RMS point-to-line distance. heading_line_str += ',' heading_line_str += 'rms_distance_to_line' @@ -4675,11 +3865,7 @@ def save_hel_frames_dict(self): output_body = self.input_video_body + '_hel_frames_dict' explain = 'trajectory fragments file' return ft.write_pickle_file( - explain, - self.output_data_dir, - output_body, - self.hel_frames_dict, - error_if_dir_not_exist=False, + explain, self.output_data_dir, output_body, self.hel_frames_dict, error_if_dir_not_exist=False ) def save_synchronization_constants(self): @@ -4689,12 +3875,8 @@ def save_synchronization_constants(self): heading_line = None data_lines = [] data_lines.append('synchronization_slope,' + str(self.synchronization_slope)) - data_lines.append( - 'synchronization_intercept,' + str(self.synchronization_intercept) - ) - data_lines.append( - 'n_synchronization_pairs,' + str(len(self.synchronization_pair_list)) - ) + data_lines.append('synchronization_intercept,' + str(self.synchronization_intercept)) + data_lines.append('n_synchronization_pairs,' + str(len(self.synchronization_pair_list))) idx = 1 for synch_pair in self.synchronization_pair_list: # Example synchronization pair: [['max_to_min', 0, 'stop', 110.465], ['5W9', 0, 1911]] @@ -4715,12 +3897,8 @@ def save_synchronization_constants(self): data_lines.append('start_or_stop_' + str(idx) + ',' + str(start_or_stop)) data_lines.append('gps_halt_time_' + str(idx) + ',' + str(gps_halt_time)) data_lines.append('hel_name_' + str(idx) + ',' + str(hel_name)) - data_lines.append( - 'camera_halt_idx_' + str(idx) + ',' + str(camera_halt_idx) - ) - data_lines.append( - 'camera_halt_frame_' + str(idx) + ',' + str(camera_halt_frame) - ) + data_lines.append('camera_halt_idx_' + str(idx) + ',' + str(camera_halt_idx)) + data_lines.append('camera_halt_frame_' + str(idx) + ',' + str(camera_halt_frame)) idx += 1 # Write. output_dir_body_ext = ft.write_csv_file( @@ -4738,45 +3916,29 @@ def save_hel_camera_passes_dict(self): output_body = self.input_video_body + '_hel_camera_passes_dict' explain = 'heliostat camera passes file' return ft.write_pickle_file( - explain, - self.output_data_dir, - output_body, - self.hel_camera_passes_dict, - error_if_dir_not_exist=False, + explain, self.output_data_dir, output_body, self.hel_camera_passes_dict, error_if_dir_not_exist=False ) def save_hel_gps_camera_analysis_dict(self): output_body = self.input_video_body + '_hel_gps_camera_analysis_dict' explain = 'heliost GPS-camera analysis file' return ft.write_pickle_file( - explain, - self.output_data_dir, - output_body, - self.hel_gps_camera_analysis_dict, - error_if_dir_not_exist=False, + explain, self.output_data_dir, output_body, self.hel_gps_camera_analysis_dict, error_if_dir_not_exist=False ) def check_pickle_files(self): # Trajectory fragments. print('\nIn TrajectoryAnalysis.check_pickle_files(), loaded hel_frames_dict:') - self.print_hel_frames_dict_aux( - pickle.load(open(self.hel_frames_dict_dir_body_ext, 'rb')) - ) + self.print_hel_frames_dict_aux(pickle.load(open(self.hel_frames_dict_dir_body_ext, 'rb'))) # Camera passes. - print( - '\nIn TrajectoryAnalysis.check_pickle_files(), loaded hel_camera_passes_dict:' - ) + print('\nIn TrajectoryAnalysis.check_pickle_files(), loaded hel_camera_passes_dict:') self.print_hel_camera_passes_dict_aux( - pickle.load(open(self.hel_camera_passes_dict_dir_body_ext, 'rb')), - max_heliostats=2, + pickle.load(open(self.hel_camera_passes_dict_dir_body_ext, 'rb')), max_heliostats=2 ) # GPS-camera analysis. - print( - '\nIn TrajectoryAnalysis.check_pickle_files(), loaded hel_gps_camera_analysis_dict:' - ) + print('\nIn TrajectoryAnalysis.check_pickle_files(), loaded hel_gps_camera_analysis_dict:') self.print_hel_gps_camera_analysis_dict_aux( - pickle.load(open(self.hel_gps_camera_analysis_dict_dir_body_ext, 'rb')), - max_heliostats=2, + pickle.load(open(self.hel_gps_camera_analysis_dict_dir_body_ext, 'rb')), max_heliostats=2 ) @@ -4835,36 +3997,38 @@ def check_pickle_files(self): experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/190_TrajectoryAnalysis/mavic_zoom/log/TrajectoryAnalysis_log.txt' ) - velocity_calculation_offset_fwd_bwd = 10 # Integer number of steps to skip to fetch a value for velocity computation. Must be at least 1. + velocity_calculation_offset_fwd_bwd = ( + 10 # Integer number of steps to skip to fetch a value for velocity computation. Must be at least 1. + ) delta_velocity_angle_xy_peak_threshold = 0.5 # 1.0 # radian delta_velocity_angle_xy_non_peak_threshold = 0.1 # radian turn_overshoot_skip_time = 1.0 # sec. Time window to allow turn correction overshoot without inferring an interval varying from turn min to turn max. scan_establish_velocity_time = 0.5 # sec. Time for the UAS to reach a "reasonably stable " velocity at the beginning of a scan pass. The UAS is "establishing" its constant scan velocity. scan_discard_velocity_time = 0.5 # sec. Time prior to the end point when a UAS bgeins changing velocity prior to the pass endpoint. The UAS is "discarding" its constant scan velocity. minimum_scan_pass_time = 10.0 # sec. Duration of the shortest possible scan pass, after already trimming away the times to establish and discard the scan velocity. - nominal_scan_speed = ( - 7.0 # m/sec. Nominal speed of UAS flight during a linear scan pass. - ) + nominal_scan_speed = 7.0 # m/sec. Nominal speed of UAS flight during a linear scan pass. # m/sec. Tolerance to use wheen deciding that the average speed of a candidate pass is consistent with a possible scan. Not the scan speed control tolerance; larger than that. scan_speed_tolerance = 0.5 - nominal_scan_velocity_z = ( - 0.0 # m/sec. Nominal vertical speed of UAS flight during a linear scan pass. - ) + nominal_scan_velocity_z = 0.0 # m/sec. Nominal vertical speed of UAS flight during a linear scan pass. # m/sec. Tolerance to use wheen deciding that the average vertical speed of a candidate pass is consistent with a possible scan. Not the scan speed control tolerance; larger than that. scan_velocity_z_tolerance = 0.25 maximum_n_missing = 10 # Varies with the number of heliostat corners. - minimum_gps_pass_inter_point_speed = 4.0 # m/sec. Minimum observed inter-point speed allowable along a contiguous GPS scan pass. - minimum_gps_pass_number_of_points = ( - 20 # Minmum number of points required to constitute a GPS pass. + minimum_gps_pass_inter_point_speed = ( + 4.0 # m/sec. Minimum observed inter-point speed allowable along a contiguous GPS scan pass. + ) + minimum_gps_pass_number_of_points = 20 # Minmum number of points required to constitute a GPS pass. + gps_pass_start_margin = ( + 5 # Number of points to shrink the start of a GPS pass after filtering to remove low-velocity points. + ) + gps_pass_stop_margin = ( + 5 # Number of points to shrink the end of a GPS pass after filtering to remove low-velocity points. ) - gps_pass_start_margin = 5 # Number of points to shrink the start of a GPS pass after filtering to remove low-velocity points. - gps_pass_stop_margin = 5 # Number of points to shrink the end of a GPS pass after filtering to remove low-velocity points. # m. Maximum distance between estimated camera trajectory points (expressed in heliostat coordiantes), to consider part of a connected trajectory. maximum_camera_pass_inter_point_distance = 4.0 - minimum_camera_pass_inter_point_speed = 1.5 # m/sec. Minimum observed inter-point speed allowable along a contiguous camera pass. - minimum_camera_pass_number_of_points = ( - 10 # Minmum number of points required to constitute a camera pass. + minimum_camera_pass_inter_point_speed = ( + 1.5 # m/sec. Minimum observed inter-point speed allowable along a contiguous camera pass. ) + minimum_camera_pass_number_of_points = 10 # Minmum number of points required to constitute a camera pass. camera_pass_start_margin = 3 # Number of points to shrink the start of a camera pass after removing points corresponding to excess missing corners. camera_pass_stop_margin = 3 # Number of points to shrink the end of a camera pass after removing points corresponding to excess missing corners. # Input/output sources. @@ -4886,32 +4050,17 @@ def check_pickle_files(self): ] # Nominal time, refined by computation. Recommend use mid-point of flight. up_heliostats = ['6W5', '6E6', '6E8', '6E9', '13E10'] up_configuration = hc.HeliostatConfiguration(az=np.deg2rad(180), el=np.deg2rad(90)) - down_heliostats = [ - '5W10', - '5W8', - '5E1', - '5E7', - '5E10', - '6E7', - '9W10', - '10W12', - '11W5', - '13W5', - '13W14', - '13E14', - ] + down_heliostats = ['5W10', '5W8', '5E1', '5E7', '5E10', '6E7', '9W10', '10W12', '11W5', '13W5', '13W14', '13E14'] down_configuration = hc.NSTTF_stow() input_video_dir_body_ext = ( - experiment_dir() - + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' + experiment_dir() + '2020-12-03_FastScan1/2_Data/20201203/1544_NS_U/mavic_zoom/DJI_427t_428_429.MP4' ) input_flight_log_dir_body_ext = ( experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/060_FlightData/data/_F08_log_2020-12-03_15-44-13_v2.csv' ) input_reconstructed_heliostats_dir = ( - experiment_dir() - + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/180_Heliostats3d/mavic_zoom/data/' + experiment_dir() + '2020-12-03_FastScan1/3_Post/Answers/20201203/1544_NS_U/180_Heliostats3d/mavic_zoom/data/' ) output_data_dir = ( experiment_dir() diff --git a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_save_read.py b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_save_read.py index 2caced2e..5995cfd3 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_save_read.py +++ b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_save_read.py @@ -32,12 +32,7 @@ def csv_row_to_connected_component(input_row): while i < n_claus: key = input_row[i] value = input_row[i + 1] - if ( - value != 'left' - and value != 'right' - and value != 'top' - and value != 'bottom' - ): + if value != 'left' and value != 'right' and value != 'top' and value != 'bottom': value = eval(input_row[i + 1]) component[key] = value i += 2 @@ -48,9 +43,7 @@ def csv_row_to_connected_component(input_row): input_file = os.path.join(csv_path, filename) # Check if the input file exists. if not os.path.exists(input_file): - raise OSError( - 'In read_connected_components(), file does not exist: ' + str(input_file) - ) + raise OSError('In read_connected_components(), file does not exist: ' + str(input_file)) # Open and read the file. components = [] with open(input_file, newline='') as input_stream: @@ -86,9 +79,7 @@ def save_fitted_lines_connected_components(components=None, filename=None, path= writer.writerow(row_output) -def save_fitted_lines_inliers_connected_components( - components=None, filename=None, path=None -): +def save_fitted_lines_inliers_connected_components(components=None, filename=None, path=None): csv_path = os.path.join(path, 'csv_files') if not os.path.exists(csv_path): os.makedirs(csv_path) @@ -134,9 +125,7 @@ def save_fitted_lines_inliers_connected_components( writer.writerow(row_output) -def save_corners_facets( - corners=None, filename=None, path=None, corners_type='top_left', facets=None -): +def save_corners_facets(corners=None, filename=None, path=None, corners_type='top_left', facets=None): csv_path = os.path.join(path, 'csv_files') if not os.path.exists(csv_path): os.makedirs(csv_path) @@ -227,9 +216,7 @@ def csv_row_to_corner(input_row): input_file = os.path.join(csv_path, filename) # Check if the input file exists. if not os.path.exists(input_file): - raise OSError( - 'In read_corners_facets(), file "' + str(input_file) + '" does not exist.' - ) + raise OSError('In read_corners_facets(), file "' + str(input_file) + '" does not exist.') # Open and read the file. corners = [] with open(input_file, newline='') as csvfile: @@ -284,12 +271,7 @@ def save_facets(facets=None, filename=None, path=None): bottom_right_corner = facet["bottom_right"] bottom_left_corner = facet["bottom_left"] center = facet["center"] - corners = [ - top_left_corner, - top_right_corner, - bottom_right_corner, - bottom_left_corner, - ] + corners = [top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner] for corner in corners: row_output = [] row_output.append('corner_type') @@ -360,9 +342,7 @@ def centers3d_to_corners3d(facet_centers, facet_width, facet_height): def read_centers3d(input_file): # Check if the input file exists. if not os.path.exists(input_file): - raise OSError( - 'In read_centers3d(), file "' + str(input_file) + '" does not exist.' - ) + raise OSError('In read_centers3d(), file "' + str(input_file) + '" does not exist.') # Open and read the file. facets_coords = [] with open(input_file, newline='') as csvfile: @@ -373,12 +353,7 @@ def read_centers3d(input_file): if not count: count += 1 continue # get rid of header - _, x, y, z = ( - input_row[0], - float(input_row[1]), - float(input_row[2]), - float(input_row[3]), - ) + _, x, y, z = (input_row[0], float(input_row[1]), float(input_row[2]), float(input_row[3])) facets_coords.append([x, y, z]) return facets_coords @@ -399,9 +374,7 @@ def read_projected_corners(filename=None, corners_per_heliostat=None, path=None) input_file = os.path.join(path, 'csv_files', filename) # Check if the input file exists. if not os.path.exists(input_file): - raise OSError( - 'In read_projected_corners(), file does not exist: ' + str(input_file) - ) + raise OSError('In read_projected_corners(), file does not exist: ' + str(input_file)) # Open and read the file. with open(input_file, newline='') as csvfile: reader = csv.reader(csvfile) diff --git a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_specifications.py b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_specifications.py index 115824fa..6ef35752 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_specifications.py +++ b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_specifications.py @@ -61,14 +61,10 @@ def __init__( self.bottom_left_facet_indx = bottom_left_facet_indx self.facets_centroids_file = facets_centroids_file self.facets_centroids = sr.read_centers3d(facets_centroids_file) - self.facets_corners = sr.centers3d_to_corners3d( - self.facets_centroids, self.facet_width, self.facet_height - ) + self.facets_corners = sr.centers3d_to_corners3d(self.facets_centroids, self.facet_width, self.facet_height) self.flat_corner_xyz_list = self.facets_corners self.heliostat_locations_file = heliostat_locations_file - self.heliostat_locations_dict = self.read_heliostat_locations_dict( - self.heliostat_locations_file - ) + self.heliostat_locations_dict = self.read_heliostat_locations_dict(self.heliostat_locations_file) # Design aim point. self.design_aim_point_x = design_aim_point_x self.design_aim_point_y = design_aim_point_y @@ -159,14 +155,10 @@ def design_focal_length(self, hel_xyz): return self.ideal_focal_length(hel_xyz) def smooth_heliostat_corner_xyz_list_given_focal_length(self, focal_length): - return heliostat_corner_xyz_list_given_focal_length_smooth( - self.flat_corner_xyz_list, focal_length - ) + return heliostat_corner_xyz_list_given_focal_length_smooth(self.flat_corner_xyz_list, focal_length) def faceted_heliostat_corner_xyz_list_given_focal_length(self, focal_length): - return heliostat_corner_xyz_list_given_focal_length_faceted( - self.flat_corner_xyz_list, focal_length - ) + return heliostat_corner_xyz_list_given_focal_length_faceted(self.flat_corner_xyz_list, focal_length) def design_heliostat_corner_xyz_list(self, hel_xyz): """ @@ -185,9 +177,7 @@ def lift_flat_corner_xyz_list(self, hel_xyz, input_flat_corner_xyz_list): Future implementations are envisioned to support a more realistic suite of design decisions. """ focal_length = self.design_focal_length(hel_xyz) - return heliostat_corner_xyz_list_given_focal_length_faceted( - input_flat_corner_xyz_list, focal_length - ) + return heliostat_corner_xyz_list_given_focal_length_faceted(input_flat_corner_xyz_list, focal_length) def heliostat_xyz(self, hel_name): """ @@ -315,9 +305,7 @@ def construct_design_heliostat_spec(self, hel_xyz): This routine returns a heliostat_spec, for the nominal design of a heliostat located in the given position. As noted above, the issues determining the design vary depending on solar field design and manufacturing decisions. """ - return self.construct_focal_length_heliostat_spec( - self.design_focal_length(hel_xyz) - ) + return self.construct_focal_length_heliostat_spec(self.design_focal_length(hel_xyz)) # HELPER FUNCTIONS @@ -331,26 +319,16 @@ def smooth_z(x, y, focal_length): return k * (r * r) -def heliostat_corner_xyz_list_given_focal_length_smooth( - flat_corner_xyz_list, focal_length -): - return [ - [xyz[0], xyz[1], xyz[2] + smooth_z(xyz[0], xyz[1], focal_length)] - for xyz in flat_corner_xyz_list - ] +def heliostat_corner_xyz_list_given_focal_length_smooth(flat_corner_xyz_list, focal_length): + return [[xyz[0], xyz[1], xyz[2] + smooth_z(xyz[0], xyz[1], focal_length)] for xyz in flat_corner_xyz_list] -def heliostat_corner_xyz_list_given_focal_length_faceted( - flat_corner_xyz_list, focal_length -): +def heliostat_corner_xyz_list_given_focal_length_faceted(flat_corner_xyz_list, focal_length): # Construct initial smooth design. smooth_xyz_list = [ - [xyz[0], xyz[1], xyz[2] + smooth_z(xyz[0], xyz[1], focal_length)] - for xyz in flat_corner_xyz_list + [xyz[0], xyz[1], xyz[2] + smooth_z(xyz[0], xyz[1], focal_length)] for xyz in flat_corner_xyz_list ] - return vertically_move_facets_to_flat_z_heights( - flat_corner_xyz_list, smooth_xyz_list - ) + return vertically_move_facets_to_flat_z_heights(flat_corner_xyz_list, smooth_xyz_list) def vertically_move_facets_to_flat_z_heights(flat_corner_xyz_list, smooth_xyz_list): @@ -369,17 +347,13 @@ def shift_xyz(xyz, dz): flat_xyz_B = flat_corner_xyz_list_2.pop(0) flat_xyz_C = flat_corner_xyz_list_2.pop(0) flat_xyz_D = flat_corner_xyz_list_2.pop(0) - flat_z_mean = ( - flat_xyz_A[2] + flat_xyz_B[2] + flat_xyz_C[2] + flat_xyz_D[2] - ) / 4 + flat_z_mean = (flat_xyz_A[2] + flat_xyz_B[2] + flat_xyz_C[2] + flat_xyz_D[2]) / 4 # Smooth facet. smooth_xyz_A = smooth_xyz_list_2.pop(0) smooth_xyz_B = smooth_xyz_list_2.pop(0) smooth_xyz_C = smooth_xyz_list_2.pop(0) smooth_xyz_D = smooth_xyz_list_2.pop(0) - smooth_z_mean = ( - smooth_xyz_A[2] + smooth_xyz_B[2] + smooth_xyz_C[2] + smooth_xyz_D[2] - ) / 4 + smooth_z_mean = (smooth_xyz_A[2] + smooth_xyz_B[2] + smooth_xyz_C[2] + smooth_xyz_D[2]) / 4 # Shift. dz = flat_z_mean - smooth_z_mean shifted_xyz_list.append(shift_xyz(smooth_xyz_A, dz)) diff --git a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py index 534018d1..bf588967 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py +++ b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py @@ -56,12 +56,8 @@ CY = 2160 / 2 # H / 2 # ?? SCAFFOLDING RCB -- USED TO BE SONY ABOVE (NOT [FRAME]) # ?? SCAFFOLDING RCB -- USED TO BE SONY ABOVE (NOT [FRAME]) # ?? SCAFFOLDING RCB -- "DistCoefs" MISSPELLED -- SHOULD BE "DistCoeffs", OR EVEN BETTER, "DISTORTION_COEFFICIENTS" -DistCoefs = np.array( - [[K1, K2, P1, P2]] -) # ?? SCAFFOLDING RCB -- USED TO BE SONY ABOVE (NOT [FRAME]) -ZeroDistCoefs = np.array( - [[1.0e-9, 0.0, 0.0, 0.0]] -) # ?? SCAFFOLDING RCB -- SET FLOAT TYPE PROPERLY +DistCoefs = np.array([[K1, K2, P1, P2]]) # ?? SCAFFOLDING RCB -- USED TO BE SONY ABOVE (NOT [FRAME]) +ZeroDistCoefs = np.array([[1.0e-9, 0.0, 0.0, 0.0]]) # ?? SCAFFOLDING RCB -- SET FLOAT TYPE PROPERLY CameraMatrix = np.array( [ [FX, 0, CX], # ?? SCAFFOLDING RCB -- USED TO BE SONY ABOVE (NOT [FRAME]) @@ -100,12 +96,8 @@ PLT_CENTER_COLOR = 'c' ## MAGIC NUMBERS -INTER_POINT_DISTANCE = ( - 20 # for the corners # ?? SCAFFOLDING RCB - MODIFIED ORIGINAL CODE -) -SIDE_FACET_DISTANCE = ( - 800 # for the facets # ?? SCAFFOLDING RCB - MODIFIED ORIGINAL CODE -) +INTER_POINT_DISTANCE = 20 # for the corners # ?? SCAFFOLDING RCB - MODIFIED ORIGINAL CODE +SIDE_FACET_DISTANCE = 800 # for the facets # ?? SCAFFOLDING RCB - MODIFIED ORIGINAL CODE COMPONENT_THRESHOLD = 35 # 50 # 100 # ?? SCAFFOLDING RCB - MODIFIED ORIGINAL CODE CLUSTERED_CORNERS_DISTANCE = 100 # ?? SCAFFOLDING RCB - MODIFIED ORIGINAL CODE # INTER_POINT_DISTANCE = 20 # for the corners # ?? SCAFFOLDING RCB - ORIGINAL CODE @@ -114,9 +106,7 @@ # CLUSTERED_CORNERS_DISTANCE = 100 # ?? SCAFFOLDING RCB - ORIGINAL CODE ## Tracking -MINIMUM_FRACTION_OF_CONFIRMED_CORNERS = ( - 0.7 # This is a measure of quality of match. For whatever corners are expected -) +MINIMUM_FRACTION_OF_CONFIRMED_CORNERS = 0.7 # This is a measure of quality of match. For whatever corners are expected # to be seen inside the frame, this fraction must match via image confirmation. MINIMUM_CORNERS_REQUIRED_INSIDE_FRAME = ( @@ -180,26 +170,20 @@ def save_image(img, imgname, path): - print( - 'In save_image(), saving:', os.path.join(path, imgname) - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In save_image(), saving:', os.path.join(path, imgname)) # ?? SCAFFOLDING RCB -- TEMPORARY if img is None: # ?? SCAFFOLDING RCB -- TEMPORARY print( - 'WARNING: In save_image(), img==None encountered for output:', - os.path.join(path, imgname), + 'WARNING: In save_image(), img==None encountered for output:', os.path.join(path, imgname) ) # ?? SCAFFOLDING RCB -- TEMPORARY if img is not None: cv.imwrite(os.path.join(path, imgname), img) def save_fig(img=None, imgname=None, path=None, dpi=500, rgb=False): - print( - 'In save_fig(), saving:', os.path.join(path, imgname) - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In save_fig(), saving:', os.path.join(path, imgname)) # ?? SCAFFOLDING RCB -- TEMPORARY if img is None: # ?? SCAFFOLDING RCB -- TEMPORARY print( - 'WARNING: In save_fig(), img==None encountered for output:', - os.path.join(path, imgname), + 'WARNING: In save_fig(), img==None encountered for output:', os.path.join(path, imgname) ) # ?? SCAFFOLDING RCB -- TEMPORARY if img is not None: plt.figure() @@ -249,31 +233,14 @@ def CannyImg(img=None, canny_type='normal', lower=None, upper=None): return cv.Canny(img, threshold1=lower, threshold2=upper) -def extract_all_frames_from_video( - vidoe_path=None, video_name=None, saving_path=None, fps=30 -): +def extract_all_frames_from_video(vidoe_path=None, video_name=None, saving_path=None, fps=30): cmd = 'ffmpeg' - cmd += ( - ' -i ' - + vidoe_path - + video_name - + ' -vf fps=' - + str(fps) - + ' ' - + saving_path - + 'img%d.png' - ) + cmd += ' -i ' + vidoe_path + video_name + ' -vf fps=' + str(fps) + ' ' + saving_path + 'img%d.png' subprocess.call(cmd.split()) def extract_frames_from_video( - video_path=None, - video_name=None, - saving_path=None, - fps=30, - starting_time=None, - duration=None, - backward=False, + video_path=None, video_name=None, saving_path=None, fps=30, starting_time=None, duration=None, backward=False ): cmd = 'ffmpeg' if starting_time is not None and starting_time != '': @@ -379,9 +346,7 @@ def extract_frames_nopipe( subprocess.call(command) -def extract_frames_opencv( - video=None, start=None, end=None, saving_path=None, every=1, plot=False, store=False -): +def extract_frames_opencv(video=None, start=None, end=None, saving_path=None, every=1, plot=False, store=False): vid = cv.VideoCapture(video) vid.set(cv.CAP_PROP_POS_FRAMES, start - 1) frame = start @@ -415,13 +380,7 @@ def extract_frames_opencv( def extract_specific_frames_pipe( - video_path=None, - video_name=None, - starting_frame_id=None, - ending_frame_id=None, - fps=30, - height=2160, - width=3840, + video_path=None, video_name=None, starting_frame_id=None, ending_frame_id=None, fps=30, height=2160, width=3840 ): command = [ 'ffmpeg', @@ -491,9 +450,7 @@ def extract_specific_frames_pipe( return image_list -def set_proper_hom_coef_sign( - point_on_line, btype, A, B, C -) -> tuple[float, float, float]: +def set_proper_hom_coef_sign(point_on_line, btype, A, B, C) -> tuple[float, float, float]: """Ensures negative sign distance for points on the mirror side of the line. For a given x and y in "Ax + Bx + C", ensure that the sum is positive/negative @@ -585,9 +542,7 @@ def plot_line(pixels, points, hom_coef, color='r'): component['original_line_points'] = [x1, y1, x2, y2] # col, row, col, row if plot_fit: plot_line( - pixels=pixels, - points=component['original_line_points'], - hom_coef=component['original_line_hom_coef'], + pixels=pixels, points=component['original_line_points'], hom_coef=component['original_line_hom_coef'] ) return component @@ -615,9 +570,7 @@ def fit_line_pixels(pixels): return A, B, C -def fit_line_inliers_pixels( - pixels, coeff, min_tolerance=0.5, max_tolerance=5, tol_step=0.1 -): +def fit_line_inliers_pixels(pixels, coeff, min_tolerance=0.5, max_tolerance=5, tol_step=0.1): A, B, C = coeff required_inliers = int(round(0.7 * len(pixels))) tolerance = min_tolerance @@ -727,32 +680,20 @@ def solvePNP(points3d, points2d, h, w, pnptype='pnp', cam_matrix=None, dist_coef # Check input. if len(points3d) != len(points2d): msg = ( - 'In solvePNP(), len(points3d)=' - + str(len(points3d)) - + ' does not equal len(points3d)=' - + str(len(points3d)) + 'In solvePNP(), len(points3d)=' + str(len(points3d)) + ' does not equal len(points3d)=' + str(len(points3d)) ) print('ERROR: ' + msg) raise ValueError(msg) if len(points3d) < 4: - msg = ( - 'In solvePNP(), len(points3d)=' + str(len(points3d)) + ' is not at least 4.' - ) + msg = 'In solvePNP(), len(points3d)=' + str(len(points3d)) + ' is not at least 4.' print('ERROR: ' + msg) raise ValueError(msg) points3d_plane = points3d.copy() points3d_plane[:, 2] = 0 if pnptype == 'calib': - _, mtx, dist, _, _ = cv.calibrateCamera( - [points3d_plane], [points2d], (w, h), None, None - ) + _, mtx, dist, _, _ = cv.calibrateCamera([points3d_plane], [points2d], (w, h), None, None) _, mtx, dist, rvecs, tvecs = cv.calibrateCamera( - [points3d], - [points2d], - (w, h), - mtx, - dist, - flags=cv.CALIB_USE_INTRINSIC_GUESS, + [points3d], [points2d], (w, h), mtx, dist, flags=cv.CALIB_USE_INTRINSIC_GUESS ) rvec, tvec = rvecs[0], tvecs[0] else: @@ -784,9 +725,7 @@ def uncouple_points(corners): def setup_loger(name, log_file, level=log.INFO): - formatter = log.Formatter( - '%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s' - ) + formatter = log.Formatter('%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s') handler = log.FileHandler(log_file) handler.setFormatter(formatter) @@ -800,9 +739,7 @@ def setup_loger(name, log_file, level=log.INFO): def multiprocessing_loger(log_file, level=log.INFO): logger = mp.get_logger() logger.setLevel(level) - formatter = log.Formatter( - '[%(asctime)s| %(levelname)s| %(processName)s] %(message)s' - ) + formatter = log.Formatter('[%(asctime)s| %(levelname)s| %(processName)s] %(message)s') handler = log.FileHandler(log_file) handler.setFormatter(formatter) diff --git a/contrib/app/ufacet-s/helio_scan/lib/FrameNameXyList.py b/contrib/app/ufacet-s/helio_scan/lib/FrameNameXyList.py index e3fe7197..b4bd3299 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/FrameNameXyList.py +++ b/contrib/app/ufacet-s/helio_scan/lib/FrameNameXyList.py @@ -171,9 +171,7 @@ def number_of_points_in_bounds(self, bounding_box): for xy in xy_list: x = xy[0] y = xy[1] - if ((x_min <= x) and (x <= x_max)) and ( - (y_min <= y) and (y <= y_max) - ): + if ((x_min <= x) and (x <= x_max)) and ((y_min <= y) and (y <= y_max)): total_points += 1 return total_points @@ -215,9 +213,7 @@ def points_per_frame(self) -> dict[int, int]: # MODIFICATION - def add_list_of_name_xy_lists( - self, frame_id: int, input_list_of_name_xy_lists: NXL - ): + def add_list_of_name_xy_lists(self, frame_id: int, input_list_of_name_xy_lists: NXL): """ Add a list of [name, xy_list] pairs to the dictionary, under the given frame_id key. Assumes the frame_id is not already there. @@ -252,9 +248,7 @@ def merge_list_of_name_xy_lists( self.dictionary[frame_id] = copy.deepcopy(input_list_of_name_xy_lists) else: existing_list_of_name_xy_lists = self.dictionary[frame_id] - existing_name_list = [ - name_xy_list[0] for name_xy_list in existing_list_of_name_xy_lists - ] + existing_name_list = [name_xy_list[0] for name_xy_list in existing_list_of_name_xy_lists] for input_name_xy_list in input_list_of_name_xy_lists: input_name_xy_list_copy = copy.deepcopy(input_name_xy_list) input_name_copy = input_name_xy_list_copy[0] @@ -323,10 +317,7 @@ def load(self, input_dir_body_ext: str): # "fnxl" abbreviates "FrameNameXyList" # print('In FrameNameXyList.load(), loading input file: ', input_dir_body_ext) # Check if the input file exists. if not ft.file_exists(input_dir_body_ext): - raise OSError( - 'In FrameNameXyList.load(), file does not exist: ' - + str(input_dir_body_ext) - ) + raise OSError('In FrameNameXyList.load(), file does not exist: ' + str(input_dir_body_ext)) # Open and read the file. with open(input_dir_body_ext, newline='') as input_stream: reader = csv.reader(input_stream, delimiter=',') @@ -344,24 +335,17 @@ def parse_row_list_of_name_xylists(self, input_row_remainder: list[str]) -> NXL: if n_hel == 0: return [] else: - return self.parse_row_list_of_name_xylists_aux( - input_row_remainder[1:], n_hel, return_remainder=False - ) + return self.parse_row_list_of_name_xylists_aux(input_row_remainder[1:], n_hel, return_remainder=False) def from_csv_line(self, data: list[str]) -> tuple[NXL, list[str]]: n_hel = int(data[0]) if n_hel == 0: return [], data[1:] else: - return self.parse_row_list_of_name_xylists_aux( - data[1:], n_hel, return_remainder=True - ) + return self.parse_row_list_of_name_xylists_aux(data[1:], n_hel, return_remainder=True) def parse_row_list_of_name_xylists_aux( - self, - input_row_list_of_name_xylists: list[str], - n_hel: int, - return_remainder=False, + self, input_row_list_of_name_xylists: list[str], n_hel: int, return_remainder=False ) -> NXL | tuple[NXL, list[str]]: # Fetch this name_xylist's heliostat name and the number of points in its xylist. hel_name = input_row_list_of_name_xylists[0] @@ -371,10 +355,7 @@ def parse_row_list_of_name_xylists_aux( for idx in range(0, n_points): idx_x = 2 + (2 * idx) idx_y = idx_x + 1 - vertex_str = [ - input_row_list_of_name_xylists[idx_x], - input_row_list_of_name_xylists[idx_y], - ] + vertex_str = [input_row_list_of_name_xylists[idx_x], input_row_list_of_name_xylists[idx_y]] x_str = vertex_str[0] y_str = vertex_str[1] x = float(x_str) @@ -416,16 +397,7 @@ def save(self, output_dir_body_ext: str): @staticmethod def csv_header(delimeter=",") -> str: - return delimeter.join( - [ - "frame_id", - "n_heliostats", - "hel_name", - "n_vertices", - "vert_1_x", - "vert_1_y", - ] - ) + return delimeter.join(["frame_id", "n_heliostats", "hel_name", "n_vertices", "vert_1_x", "vert_1_y"]) def to_csv_line(self, delimeter=",", frame_id=None): """Converts a single frame id to a string.""" @@ -472,12 +444,7 @@ def print( indent=None, ): # Number of blankss to print at the beginning of each line. # Print. - dt.print_dict( - self.dictionary, - max_keys=max_keys, - max_value_length=max_value_length, - indent=indent, - ) + dt.print_dict(self.dictionary, max_keys=max_keys, max_value_length=max_value_length, indent=indent) def draw_frames( self, @@ -514,30 +481,21 @@ def draw_frames( # Call draw_frame(), using appropriate execution mode. if single_processor == True: - print( - 'In In FrameNameXyList.draw_frames(), starting frame rendering (single processor)...' - ) + print('In In FrameNameXyList.draw_frames(), starting frame rendering (single processor)...') for frame_id in self.dictionary.keys(): self.draw_frame(frame_id) print('In In FrameNameXyList.draw_frames(), frame rendering done.') elif single_processor == False: - print( - 'In In FrameNameXyList.draw_frames(), starting frame rendering (multi-processor)...' - ) + print('In In FrameNameXyList.draw_frames(), starting frame rendering (multi-processor)...') logger = logt.multiprocessing_logger(log_dir_body_ext, level=logging.INFO) - logger.info( - '================================= Execution =================================' - ) + logger.info('================================= Execution =================================') with Pool(25) as pool: pool.map(self.draw_frame, self.dictionary.keys()) print('In In FrameNameXyList.draw_frames(), frame rendering done.') else: - print( - 'ERROR: In FrameNameXyList.draw_frames(), unexpected value single_processor =', - str(single_processor), - ) + print('ERROR: In FrameNameXyList.draw_frames(), unexpected value single_processor =', str(single_processor)) assert False def draw_frame(self, frame_id: int): @@ -556,9 +514,7 @@ def draw_frame(self, frame_id: int): style_dict = self.draw_frame_style_dict crop = self.draw_frame_crop # Construct frame file name. - key_frame_body_ext = upf.frame_file_body_ext_given_frame_id( - input_video_body, frame_id, input_frame_id_format - ) + key_frame_body_ext = upf.frame_file_body_ext_given_frame_id(input_video_body, frame_id, input_frame_id_format) # Load frame. input_dir_body_ext = os.path.join(input_frame_dir, key_frame_body_ext) frame_img = cv.imread(input_dir_body_ext) @@ -579,14 +535,8 @@ def draw_frame(self, frame_id: int): assert False first_pt = active_xy_list_2[0] active_xy_list_2.append(first_pt) - annotation_list.append( - pa.PlotAnnotation( - 'point_seq', active_xy_list_2, None, style_dict['point_seq'] - ) - ) - annotation_list.append( - pa.PlotAnnotation('text', [label_xy], hel_name, style_dict['text']) - ) + annotation_list.append(pa.PlotAnnotation('point_seq', active_xy_list_2, None, style_dict['point_seq'])) + annotation_list.append(pa.PlotAnnotation('text', [label_xy], hel_name, style_dict['text'])) # Prepare crop_box. # Crop box is [[x_min, y_min], [x_max, y_max]] or None. if crop: @@ -600,9 +550,7 @@ def draw_frame(self, frame_id: int): ip.plot_image_figure( frame_img, rgb=False, - title=( - str(input_video_body) + ' Frame ' + str(frame_id) + ', ' + title_name - ), + title=(str(input_video_body) + ' Frame ' + str(frame_id) + ', ' + title_name), annotation_list=annotation_list, crop_box=crop_box, context_str=context_str, @@ -629,9 +577,7 @@ def remove_flag_points(self, input_xy_list: XL) -> XL: # HELPER FUNCTIONS -def construct_merged_copy( - input_fnxl_list: FNXL, -) -> FNXL: # A list of FrameNameXyList objects. +def construct_merged_copy(input_fnxl_list: FNXL) -> FNXL: # A list of FrameNameXyList objects. """ Constructs a new FrameNameXyList object, combining the entries of the input FrameNameXyList objects, without modifying them. """ diff --git a/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer2dFrame.py b/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer2dFrame.py index 03f73f1f..81e0f104 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer2dFrame.py +++ b/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer2dFrame.py @@ -109,16 +109,12 @@ def setup_homography_dicts(self): for homography_dict in homography_dicts: z = homography_dict['z'] coplanar_corner_dicts = [] # All points within homography plane. - common_corner_dicts = ( - [] - ) # Points within homography plane, excluding missing points. + common_corner_dicts = [] # Points within homography plane, excluding missing points. for corner_dict in self.corner_dicts: if corner_dict['flat_xyz'][2] == z: corner_dict['homography_z'] = z coplanar_corner_dicts.append(corner_dict) - if (corner_dict['observed_xy'][0] != -1) or ( - corner_dict['observed_xy'][1] != -1 - ): + if (corner_dict['observed_xy'][0] != -1) or (corner_dict['observed_xy'][1] != -1): common_corner_dicts.append(corner_dict) homography_dict['coplanar_corner_dicts'] = coplanar_corner_dicts homography_dict['common_corner_dicts'] = common_corner_dicts @@ -127,16 +123,12 @@ def setup_homography_dicts(self): def contruct_homographies(self): for homography_dict in self.homography_dicts: - self.contruct_homography( - homography_dict - ) # Adds to homography_dict as a side effect. + self.contruct_homography(homography_dict) # Adds to homography_dict as a side effect. def contruct_homography(self, homography_dict): common_corner_dicts = homography_dict['common_corner_dicts'] common_observed_xy_list = [d['observed_xy'] for d in common_corner_dicts] - common_flat_xy_list = [ - d['flat_xyz'][0:2] for d in common_corner_dicts - ] # Note drop z. + common_flat_xy_list = [d['flat_xyz'][0:2] for d in common_corner_dicts] # Note drop z. source_points = np.array(common_observed_xy_list) destination_points = np.array(common_flat_xy_list) H, retval = cv.findHomography(source_points, destination_points) @@ -159,9 +151,7 @@ def map_observed_points_onto_flat_model_aux(self, homography_dict): normalized_onto_flat_xy1 = mapped_onto_flat_xy1 / mapped_onto_flat_xy1[2] # Construct a 3-d point for convenience in rendering up to this point in the computation. normalized_onto_flat_xy = normalized_onto_flat_xy1[0:2] - normalized_onto_flat_xyz = list(normalized_onto_flat_xy) + [ - corner_dict['homography_z'] - ] + normalized_onto_flat_xyz = list(normalized_onto_flat_xy) + [corner_dict['homography_z']] # Store mapped points. corner_dict['observed_xy1'] = observed_xy1 corner_dict['mapped_onto_flat_xy1'] = mapped_onto_flat_xy1 diff --git a/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py b/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py index 818734ca..d361b2dd 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py +++ b/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py @@ -78,20 +78,13 @@ def __init__( ): # Flags to control rendering; e.g., whether to output intermediate construction steps. # Start progress log. self.search_log = [] - msg_line = ( - tdt.current_time_string() - + ' ' - + str(hel_name) - + ' starting 3d inference...' - ) + msg_line = tdt.current_time_string() + ' ' + str(hel_name) + ' starting 3d inference...' self.search_log.append(msg_line) print('\n' + msg_line) # Data. self.hel_name = hel_name - self.list_of_frame_id_observed_corner_xy_lists = ( - list_of_frame_id_observed_corner_xy_lists - ) + self.list_of_frame_id_observed_corner_xy_lists = list_of_frame_id_observed_corner_xy_lists self.flat_corner_xyz_list = flat_corner_xyz_list self.n_corners = len(self.flat_corner_xyz_list) # Execution control. @@ -100,9 +93,7 @@ def __init__( self.zero_distortion_coefficients = zero_distortion_coefficients # Input/output sources. self.specifications = specifications - self.theoretical_flat_heliostat_dir_body_ext = ( - theoretical_flat_heliostat_dir_body_ext - ) + self.theoretical_flat_heliostat_dir_body_ext = theoretical_flat_heliostat_dir_body_ext self.theoretical_flat_heliostat_dict = theoretical_flat_heliostat_dict self.theoretical_flat_heliostat_xyz_list = theoretical_flat_heliostat_xyz_list self.theoretical_flat_heliostat_spec = ( @@ -119,26 +110,13 @@ def __init__( self.render_control = render_control # Execution control. # ?? SCAFFOLDING RCB - MAKE THIS AN INPUT - self.max_frames_to_process = ( - 5000 # For this heliostat. # ?? SCAFFOLDING RCB -- FIX VALUE - ) + self.max_frames_to_process = 5000 # For this heliostat. # ?? SCAFFOLDING RCB -- FIX VALUE self.max_missing_corners_to_allow = 35 # 0 self.minimum_points_per_facet = 2 self.n_steps_one_direction = 20 self.n_planar_iterations = 50 self.n_canting_iterations = 50 - self.variable_steps = [ - 1, - 2, - 3, - 4, - 6, - 8, - 10, - 13, - 16, - 20, - ] # Search more coarsely where a larger step is needed. + self.variable_steps = [1, 2, 3, 4, 6, 8, 10, 13, 16, 20] # Search more coarsely where a larger step is needed. # Computation cmplexity measurement. self.calls_to_solve_pnp = 0 @@ -152,24 +130,13 @@ def __init__( # Construct an ideal version of this heliostat. msg_line = ( - tdt.current_time_string() - + ' ' - + str(hel_name) - + ' constructing ideal heliostat...' - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - self.search_log.append( - msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - print( - '\n' + msg_line + tdt.current_time_string() + ' ' + str(hel_name) + ' constructing ideal heliostat...' ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - self.design_heliostat_spec = ( - self.specifications.construct_design_heliostat_spec(self.hel_xyz) - ) - self.design_heliostat_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - self.design_heliostat_spec - ) + self.search_log.append(msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + print('\n' + msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + self.design_heliostat_spec = self.specifications.construct_design_heliostat_spec(self.hel_xyz) + self.design_heliostat_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( + self.design_heliostat_spec ) ( self.design_heliostat_spec, @@ -183,12 +150,8 @@ def __init__( msg_line = ( tdt.current_time_string() + ' ' + str(hel_name) + ' analyzing frames...' ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - self.search_log.append( - msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - print( - '\n' + msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + self.search_log.append(msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + print('\n' + msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. self.dict_of_frame_dicts = {} for frame_id_xy_list in self.list_of_frame_id_observed_corner_xy_lists: frame_id = frame_id_xy_list[0] @@ -198,12 +161,8 @@ def __init__( not_missing_mask = [] correspond_observed_xy_list = [] correspond_design_xyz_list = [] - for observed_xy, design_xyz in zip( - observed_corner_xy_list, self.design_heliostat_xyz_list - ): - if (observed_xy[0] != -1) or ( - observed_xy[1] != -1 - ): # ?? SCAFFOLDING RCB - MAKE A CALL TO A PREDICATE + for observed_xy, design_xyz in zip(observed_corner_xy_list, self.design_heliostat_xyz_list): + if (observed_xy[0] != -1) or (observed_xy[1] != -1): # ?? SCAFFOLDING RCB - MAKE A CALL TO A PREDICATE # This is a valid point. not_missing_mask.append(True) correspond_design_xyz_list.append(design_xyz) @@ -212,9 +171,7 @@ def __init__( not_missing_mask.append(False) n_missing += 1 # Compute camera pose, given only this frame. - camera_rvec, camera_tvec = self.compute_camera_pose( - correspond_design_xyz_list, correspond_observed_xy_list - ) + camera_rvec, camera_tvec = self.compute_camera_pose(correspond_design_xyz_list, correspond_observed_xy_list) # Collect results for this frame. frame_dict = {} # ?? SCAFFOLDING RCB -- SHOULD THIS BE A CLASS? frame_dict['frame_id'] = frame_id @@ -224,9 +181,7 @@ def __init__( frame_dict['correspond_design_xyz_list'] = correspond_design_xyz_list frame_dict['correspond_observed_xy_list'] = correspond_observed_xy_list frame_dict['n_missing'] = n_missing - frame_dict['use_for_metrology'] = ( - n_missing <= self.max_missing_corners_to_allow - ) + frame_dict['use_for_metrology'] = n_missing <= self.max_missing_corners_to_allow frame_dict['single_frame_camera_rvec'] = camera_rvec frame_dict['single_frame_camera_tvec'] = camera_tvec # Add to results for all frames. @@ -246,19 +201,12 @@ def __init__( linestyle='-', linewidth=0.3, color='m', marker='+', markersize=3 #'o', ), # 0.4), processed_style=rcps.RenderControlPointSeq( - linestyle='-', - linewidth=0.5, - marker='o', - markersize=0.7, - markeredgewidth=0.2, - color='b', + linestyle='-', linewidth=0.5, marker='o', markersize=0.7, markeredgewidth=0.2, color='b' ), ) # Report the reprojection error over all frames, assuming the ideal heliostat. - design_error_n_pts = self.reprojection_error_all_frames_given_heliostat_spec( - self.design_heliostat_spec - ) + design_error_n_pts = self.reprojection_error_all_frames_given_heliostat_spec(self.design_heliostat_spec) msg_line = ( tdt.current_time_string() + ' ' @@ -270,12 +218,8 @@ def __init__( + ', n_points =' + str(design_error_n_pts[1]) ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - self.search_log.append( - msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - print( - '\n' + msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + self.search_log.append(msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + print('\n' + msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. # # Draw frame images, for reference. # for frame_id in dt.sorted_keys(self.dict_of_frame_dicts): @@ -294,12 +238,8 @@ def __init__( + str(hel_name) + ' adjusting design heliostat to minimize reprojection error...' ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - self.search_log.append( - msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. - print( - '\n' + msg_line - ) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + self.search_log.append(msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. + print('\n' + msg_line) # ?? SCAFFOLDING RCB -- ENCAPSULATE THESE INTO A "LOG" MEMBER FUNCTION. self.search_heliostat_spec = copy.deepcopy(self.design_heliostat_spec) self.adjust_heliostat_spec_to_minimize_reprojection_error( self.search_heliostat_spec, # Changed as a side effect. @@ -310,9 +250,7 @@ def __init__( ) # Save the projected points from the final step of the search. - self.final_projected_points_dict = self.construct_final_projected_points_dict( - self.search_heliostat_spec - ) + self.final_projected_points_dict = self.construct_final_projected_points_dict(self.search_heliostat_spec) # Save and render final result. self.save_and_analyze_final_heliostat_spec( @@ -338,17 +276,10 @@ def select_distortion_model(self): ) def adjust_heliostat_spec_to_minimize_reprojection_error( - self, - search_heliostat_spec, - n_planar_iterations, - n_canting_iterations, - n_steps_one_direction, - variable_steps, + self, search_heliostat_spec, n_planar_iterations, n_canting_iterations, n_steps_one_direction, variable_steps ): # Save figures for initial state. - self.save_and_analyze_heliostat_spec( - 'SearchIter' + str(0), search_heliostat_spec - ) + self.save_and_analyze_heliostat_spec('SearchIter' + str(0), search_heliostat_spec) # Iterative search, allowing planar adjustment of facet positions (x,y,z,rot_z). self.search_overall_error_history = [] @@ -356,11 +287,7 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( n_facets = 25 # ?? SCAFFOLDING RCB -- MAKE GENERAL ACROSS DESIGNS facet_is_converged = [False] * n_facets for iteration in range(1, (n_planar_iterations + 1)): - current_error_n_pts = ( - self.reprojection_error_all_frames_given_heliostat_spec( - search_heliostat_spec - ) - ) + current_error_n_pts = self.reprojection_error_all_frames_given_heliostat_spec(search_heliostat_spec) current_overall_error = current_error_n_pts[0] current_n_points_sum = current_error_n_pts[1] self.search_overall_error_history.append([iteration, current_overall_error]) @@ -369,9 +296,7 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( + ' ' + str(self.hel_name) + ' iteration {iteration:2d} overall error={overall_err:11.8f} n_points={n_points:d}'.format( - iteration=iteration, - overall_err=current_overall_error, - n_points=current_n_points_sum, + iteration=iteration, overall_err=current_overall_error, n_points=current_n_points_sum ) ) self.search_log.append(msg_line) @@ -379,29 +304,19 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( previous_error = current_overall_error for facet_idx in range(0, n_facets): if not facet_is_converged[facet_idx]: - (best_var_name, best_min_error, best_min_error_del_var) = ( - self.find_best_variable_xyz_rot_z( - search_heliostat_spec, - facet_to_adjust_idx=facet_idx, - n_steps_one_direction=n_steps_one_direction, - variable_steps=variable_steps, - ) + (best_var_name, best_min_error, best_min_error_del_var) = self.find_best_variable_xyz_rot_z( + search_heliostat_spec, + facet_to_adjust_idx=facet_idx, + n_steps_one_direction=n_steps_one_direction, + variable_steps=variable_steps, ) if best_min_error_del_var == 0: facet_is_converged[facet_idx] = True - search_heliostat_spec[facet_idx][ - best_var_name - ] += best_min_error_del_var + search_heliostat_spec[facet_idx][best_var_name] += best_min_error_del_var error_reduction = previous_error - best_min_error previous_error = best_min_error self.search_facet_error_history.append( - [ - iteration, - facet_idx, - best_var_name, - best_min_error, - best_min_error_del_var, - ] + [iteration, facet_idx, best_var_name, best_min_error, best_min_error_del_var] ) msg_line = ( tdt.current_time_string() @@ -424,28 +339,18 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( tdt.current_time_string() + ' ' + str(self.hel_name) - + ' iteration {iteration:2d}; all facets are converged (x,y,z,rot_z).'.format( - iteration=iteration - ) + + ' iteration {iteration:2d}; all facets are converged (x,y,z,rot_z).'.format(iteration=iteration) ) self.search_log.append(msg_line) print('\n' + msg_line) break # Save figures for last planar iteration. - self.save_and_analyze_heliostat_spec( - 'SearchIter' + str(iteration), search_heliostat_spec - ) + self.save_and_analyze_heliostat_spec('SearchIter' + str(iteration), search_heliostat_spec) # Iterative search, allowing adjustment of canting angles (rot_x,rot_y). facet_is_converged = [False] * n_facets - for iteration in range( - (n_planar_iterations + 1), (n_planar_iterations + n_canting_iterations + 1) - ): - current_error_n_pts = ( - self.reprojection_error_all_frames_given_heliostat_spec( - search_heliostat_spec - ) - ) + for iteration in range((n_planar_iterations + 1), (n_planar_iterations + n_canting_iterations + 1)): + current_error_n_pts = self.reprojection_error_all_frames_given_heliostat_spec(search_heliostat_spec) current_overall_error = current_error_n_pts[0] current_n_points_sum = current_error_n_pts[1] self.search_overall_error_history.append([iteration, current_overall_error]) @@ -454,9 +359,7 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( + ' ' + str(self.hel_name) + ' iteration {iteration:2d} overall error={overall_err:11.8f} n_points={n_points:d}'.format( - iteration=iteration, - overall_err=current_overall_error, - n_points=current_n_points_sum, + iteration=iteration, overall_err=current_overall_error, n_points=current_n_points_sum ) ) self.search_log.append(msg_line) @@ -464,29 +367,19 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( previous_error = current_overall_error for facet_idx in range(0, n_facets): if not facet_is_converged[facet_idx]: - (best_var_name, best_min_error, best_min_error_del_var) = ( - self.find_best_variable_rot_xy( - search_heliostat_spec, - facet_to_adjust_idx=facet_idx, - n_steps_one_direction=n_steps_one_direction, - variable_steps=variable_steps, - ) + (best_var_name, best_min_error, best_min_error_del_var) = self.find_best_variable_rot_xy( + search_heliostat_spec, + facet_to_adjust_idx=facet_idx, + n_steps_one_direction=n_steps_one_direction, + variable_steps=variable_steps, ) if best_min_error_del_var == 0: facet_is_converged[facet_idx] = True - search_heliostat_spec[facet_idx][ - best_var_name - ] += best_min_error_del_var + search_heliostat_spec[facet_idx][best_var_name] += best_min_error_del_var error_reduction = previous_error - best_min_error previous_error = best_min_error self.search_facet_error_history.append( - [ - iteration, - facet_idx, - best_var_name, - best_min_error, - best_min_error_del_var, - ] + [iteration, facet_idx, best_var_name, best_min_error, best_min_error_del_var] ) msg_line = ( tdt.current_time_string() @@ -509,24 +402,16 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( tdt.current_time_string() + ' ' + str(self.hel_name) - + ' iteration {iteration:2d}; all facets are converged (rot_x,rot_y).'.format( - iteration=iteration - ) + + ' iteration {iteration:2d}; all facets are converged (rot_x,rot_y).'.format(iteration=iteration) ) self.search_log.append(msg_line) print('\n' + msg_line) break # Save figures for last canting iteration. - self.save_and_analyze_heliostat_spec( - 'SearchIter' + str(iteration), search_heliostat_spec - ) + self.save_and_analyze_heliostat_spec('SearchIter' + str(iteration), search_heliostat_spec) def find_best_variable_xyz_rot_z( - self, - search_heliostat_spec, - facet_to_adjust_idx, - n_steps_one_direction, - variable_steps, + self, search_heliostat_spec, facet_to_adjust_idx, n_steps_one_direction, variable_steps ): # ?? SCAFFOLDING RCB -- EVALUATE WHETHER WE CAN REDUCE THE NUMBER OF DEEPCOPIES, OR REDUCE THE COMPLEXITY OF EACH DEEPCOPY. rot_z_min_err, min_err_del_rot_z = self.find_best_value( copy.deepcopy(search_heliostat_spec), @@ -561,17 +446,11 @@ def find_best_variable_xyz_rot_z( elif min_err == c_y_min_err: return 'center_y', c_y_min_err, min_err_del_c_y else: - print( - 'ERROR: In Heliostats3dInference.find_best_variable(), unexpected situation encountered.' - ) + print('ERROR: In Heliostats3dInference.find_best_variable(), unexpected situation encountered.') assert False def find_best_variable_rot_xy( - self, - search_heliostat_spec, - facet_to_adjust_idx, - n_steps_one_direction, - variable_steps, + self, search_heliostat_spec, facet_to_adjust_idx, n_steps_one_direction, variable_steps ): rot_x_min_err, min_err_del_rot_x = self.find_best_value( copy.deepcopy(search_heliostat_spec), @@ -596,19 +475,11 @@ def find_best_variable_rot_xy( elif min_err == rot_y_min_err: return 'rot_y', rot_y_min_err, min_err_del_rot_y else: - print( - 'ERROR: In Heliostats3dInference.find_best_variable(), unexpected situation encountered.' - ) + print('ERROR: In Heliostats3dInference.find_best_variable(), unexpected situation encountered.') assert False def find_best_value( - self, - search_heliostat_spec, - facet_to_adjust_idx, - var_name, - half_range, - n_steps_one_direction, - variable_steps, + self, search_heliostat_spec, facet_to_adjust_idx, var_name, half_range, n_steps_one_direction, variable_steps ): # Common values. original_var = search_heliostat_spec[facet_to_adjust_idx][var_name] @@ -617,11 +488,7 @@ def find_best_value( # We test this first, because if nothing makes a difference, we want to prescribe zero change. zero_change_del_var = 0.0 zero_change_overall_error, zero_change_n_points_sum = self.find_best_value_aux( - search_heliostat_spec, - facet_to_adjust_idx, - var_name, - original_var, - zero_change_del_var, + search_heliostat_spec, facet_to_adjust_idx, var_name, original_var, zero_change_del_var ) # Initial error to beat. minimum_error = zero_change_overall_error @@ -630,11 +497,7 @@ def find_best_value( # Scan forward check. del_var = step * var_step overall_error, n_points_sum = self.find_best_value_aux( - search_heliostat_spec, - facet_to_adjust_idx, - var_name, - original_var, - del_var, + search_heliostat_spec, facet_to_adjust_idx, var_name, original_var, del_var ) if overall_error < minimum_error: minimum_error = overall_error @@ -642,11 +505,7 @@ def find_best_value( # Scan backward check. del_var = -del_var overall_error, n_points_sum = self.find_best_value_aux( - search_heliostat_spec, - facet_to_adjust_idx, - var_name, - original_var, - del_var, + search_heliostat_spec, facet_to_adjust_idx, var_name, original_var, del_var ) if overall_error < minimum_error: minimum_error = overall_error @@ -654,33 +513,18 @@ def find_best_value( # Return. return minimum_error, minimum_error_del_var - def find_best_value_aux( - self, - search_heliostat_spec, - facet_to_adjust_idx, - var_name, - original_var, - del_var, - ): + def find_best_value_aux(self, search_heliostat_spec, facet_to_adjust_idx, var_name, original_var, del_var): var = original_var + del_var search_heliostat_spec[facet_to_adjust_idx][var_name] = var - test_error_n_pts_by_facet = ( - self.reprojection_error_single_facet_all_frames_given_heliostat_spec( - search_heliostat_spec, facet_to_adjust_idx - ) + test_error_n_pts_by_facet = self.reprojection_error_single_facet_all_frames_given_heliostat_spec( + search_heliostat_spec, facet_to_adjust_idx ) overall_error = test_error_n_pts_by_facet[0] n_points_sum = test_error_n_pts_by_facet[1] return overall_error, n_points_sum - def reprojection_error_single_facet_all_frames_given_heliostat_spec( - self, heliostat_spec, facet_to_adjust_idx - ): - heliostat_corner_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - heliostat_spec - ) - ) + def reprojection_error_single_facet_all_frames_given_heliostat_spec(self, heliostat_spec, facet_to_adjust_idx): + heliostat_corner_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec(heliostat_spec) return self.reprojection_error_single_facet_all_frames_given_corner_xyz_list( heliostat_corner_xyz_list, facet_to_adjust_idx ) @@ -698,9 +542,7 @@ def reprojection_error_single_facet_all_frames_given_corner_xyz_list( ) facet_n_pts = reprojection_error_n_pts[1] if facet_n_pts >= self.minimum_points_per_facet: - reprojection_error_n_pts_per_frame_list.append( - reprojection_error_n_pts - ) + reprojection_error_n_pts_per_frame_list.append(reprojection_error_n_pts) # Combine the errors. distance_sum = 0 n_points_sum = 0 @@ -734,9 +576,7 @@ def reprojection_error_single_facet_one_frame_given_corner_xyz_list( obs_ll = all_observed_xy_list[(facet_to_adjust_idx * 4) + 3] # Project the 3-d heliostat corner points into the camera plane using the camera pose associated with the frame. - proj_facet_pts_reshaped = self.projected_points_given_facet_xyz_list( - frame_dict, hel_facet_xyz_list - ) + proj_facet_pts_reshaped = self.projected_points_given_facet_xyz_list(frame_dict, hel_facet_xyz_list) # Compare the projected point locations against the observed point locations, accumulating error measures. n_points = 0 @@ -747,25 +587,15 @@ def reprojection_error_single_facet_one_frame_given_corner_xyz_list( proj_lr = proj_facet_pts_reshaped[2] proj_ll = proj_facet_pts_reshaped[3] # Update errors. - n_points, overall_error_sum = self.update_facet_corner_error( - obs_ul, proj_ul, n_points, overall_error_sum - ) - n_points, overall_error_sum = self.update_facet_corner_error( - obs_ur, proj_ur, n_points, overall_error_sum - ) - n_points, overall_error_sum = self.update_facet_corner_error( - obs_lr, proj_lr, n_points, overall_error_sum - ) - n_points, overall_error_sum = self.update_facet_corner_error( - obs_ll, proj_ll, n_points, overall_error_sum - ) + n_points, overall_error_sum = self.update_facet_corner_error(obs_ul, proj_ul, n_points, overall_error_sum) + n_points, overall_error_sum = self.update_facet_corner_error(obs_ur, proj_ur, n_points, overall_error_sum) + n_points, overall_error_sum = self.update_facet_corner_error(obs_lr, proj_lr, n_points, overall_error_sum) + n_points, overall_error_sum = self.update_facet_corner_error(obs_ll, proj_ll, n_points, overall_error_sum) # Normalize overall error. if n_points == 0: error = -999999.0 # ?? SCAFFOLDING RCB -- TIE THIS TO A GLOBAL CONSTANT. else: - error = ( - overall_error_sum / n_points - ) # ?? SCAFFOLDING RCB -- ELIMINATE THIS NORMALIZATION? + error = overall_error_sum / n_points # ?? SCAFFOLDING RCB -- ELIMINATE THIS NORMALIZATION? # Return. return [error, n_points] @@ -775,11 +605,7 @@ def projected_points_given_facet_xyz_list(self, frame_dict, facet_xyz_list): camera_tvec = frame_dict['single_frame_camera_tvec'] # Using the camera pose, transform the 3-d points into the camera image space. proj_pts, jacobian = cv.projectPoints( - np.array(facet_xyz_list), - camera_rvec, - camera_tvec, - self.camera_matrix, - self.select_distortion_model(), + np.array(facet_xyz_list), camera_rvec, camera_tvec, self.camera_matrix, self.select_distortion_model() ) # ?? SCAFFOLDING RCB -- CORRECT? self.calls_to_project_points += 1 proj_pts_reshaped = proj_pts.reshape(-1, 2) @@ -794,46 +620,30 @@ def update_facet_corner_error(self, obs_xy, proj_xy, n_points, overall_error_sum return n_points, overall_error_sum def xy_is_missing(self, xy): - return (xy[0] == -1) or ( - xy[1] == -1 - ) # ?? SCAFFOLDING RCB -- DEFINE GLOBAL MISSING FLAG, USE THROUGHOUT + return (xy[0] == -1) or (xy[1] == -1) # ?? SCAFFOLDING RCB -- DEFINE GLOBAL MISSING FLAG, USE THROUGHOUT def xy_error(self, xy1, xy2): return np.sqrt((xy2[0] - xy1[0]) ** 2 + (xy2[1] - xy1[1]) ** 2) def reprojection_error_all_frames_given_heliostat_spec(self, heliostat_spec): - heliostat_corner_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - heliostat_spec - ) - ) - return self.reprojection_error_all_frames_given_corner_xyz_list( - heliostat_corner_xyz_list - ) + heliostat_corner_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec(heliostat_spec) + return self.reprojection_error_all_frames_given_corner_xyz_list(heliostat_corner_xyz_list) - def reprojection_error_all_frames_given_corner_xyz_list( - self, heliostat_corner_xyz_list - ): + def reprojection_error_all_frames_given_corner_xyz_list(self, heliostat_corner_xyz_list): # Determine the error for each frame. reprojection_error_n_pts_by_facet_per_frame_list = [] for frame_id in dt.sorted_keys(self.dict_of_frame_dicts): frame_dict = self.dict_of_frame_dicts[frame_id] if frame_dict['use_for_metrology']: - reprojection_error_n_pts_by_facet = ( - self.reprojection_error_one_frame_given_corner_xyz_list( - heliostat_corner_xyz_list, frame_id, frame_dict - ) - ) - reprojection_error_n_pts_by_facet_per_frame_list.append( - reprojection_error_n_pts_by_facet + reprojection_error_n_pts_by_facet = self.reprojection_error_one_frame_given_corner_xyz_list( + heliostat_corner_xyz_list, frame_id, frame_dict ) + reprojection_error_n_pts_by_facet_per_frame_list.append(reprojection_error_n_pts_by_facet) # Combine the errors. distance_sum = 0 n_points_sum = 0 by_facet_sum = np.zeros(25) - for ( - reprojection_error_n_pts_by_facet - ) in reprojection_error_n_pts_by_facet_per_frame_list: + for reprojection_error_n_pts_by_facet in reprojection_error_n_pts_by_facet_per_frame_list: error = reprojection_error_n_pts_by_facet[0] n_points = reprojection_error_n_pts_by_facet[1] by_facet = reprojection_error_n_pts_by_facet[2] @@ -847,16 +657,12 @@ def reprojection_error_all_frames_given_corner_xyz_list( # Return. return [overall_error, n_points_sum, by_facet_error] - def reprojection_error_one_frame_given_corner_xyz_list( - self, current_all_heliostat_xyz_list, frame_id, frame_dict - ): + def reprojection_error_one_frame_given_corner_xyz_list(self, current_all_heliostat_xyz_list, frame_id, frame_dict): # Fetch data. all_observed_xy_list = frame_dict['all_observed_xy_list'] # Project the 3-d heliostat corner points into the camera plane using the camera pose associated with the frame. - proj_pts_reshaped = self.projected_points_given_corner_xyz_list( - frame_dict, current_all_heliostat_xyz_list - ) + proj_pts_reshaped = self.projected_points_given_corner_xyz_list(frame_dict, current_all_heliostat_xyz_list) # Compare the projected point locations against the observed point locations, accumulating error measures. n_points = 0 @@ -889,15 +695,11 @@ def reprojection_error_one_frame_given_corner_xyz_list( # Store this facet's error. error_per_facet_list.append(facet_error_sum) # Normalize overall error. - error = ( - overall_error_sum / n_points - ) # ?? SCAFFOLDING RCB -- ELIMINATE THIS NORMALIZATION? + error = overall_error_sum / n_points # ?? SCAFFOLDING RCB -- ELIMINATE THIS NORMALIZATION? # Return. return [error, n_points, error_per_facet_list] - def projected_points_given_corner_xyz_list( - self, frame_dict, correspond_heliostat_xyz_list - ): + def projected_points_given_corner_xyz_list(self, frame_dict, correspond_heliostat_xyz_list): # Fetch data. camera_rvec = frame_dict['single_frame_camera_rvec'] camera_tvec = frame_dict['single_frame_camera_tvec'] @@ -914,16 +716,10 @@ def projected_points_given_corner_xyz_list( # Return. return proj_pts_reshaped - def update_corner_error( - self, obs_xy, proj_xy, n_points, overall_error_sum, facet_error_sum - ): + def update_corner_error(self, obs_xy, proj_xy, n_points, overall_error_sum, facet_error_sum): if not self.xy_is_missing(obs_xy): xy_error = self.xy_error(obs_xy, proj_xy) - return ( - n_points + 1, - overall_error_sum + xy_error, - facet_error_sum + xy_error, - ) + return (n_points + 1, overall_error_sum + xy_error, facet_error_sum + xy_error) else: return n_points, overall_error_sum, facet_error_sum @@ -933,23 +729,14 @@ def compute_camera_pose(self, reference_xyz_list, observed_xy_list): """ # Find camera position. retval, camera_rvec, camera_tvec = cv.solvePnP( - np.array(reference_xyz_list), - np.array(observed_xy_list), - self.camera_matrix, - self.select_distortion_model(), + np.array(reference_xyz_list), np.array(observed_xy_list), self.camera_matrix, self.select_distortion_model() ) self.calls_to_solve_pnp += 1 return camera_rvec, camera_tvec - def construct_frame_camera_pose_dict( - self, list_of_frame_id_observed_corner_xy_lists, heliostat_spec - ): + def construct_frame_camera_pose_dict(self, list_of_frame_id_observed_corner_xy_lists, heliostat_spec): # Construct (x,y,z) corner list. - search_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - heliostat_spec - ) - ) + search_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec(heliostat_spec) # Assemble camera pose dict. frame_camera_pose_dict = {} for frame_id_xy_list in list_of_frame_id_observed_corner_xy_lists: @@ -970,18 +757,14 @@ def construct_frame_camera_pose_dict( def construct_final_projected_points_dict(self, final_heliostat_spec): # Construct (x,y,z) list. - final_heliostat_corner_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - final_heliostat_spec - ) + final_heliostat_corner_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( + final_heliostat_spec ) # Collect projected points for each frame. final_projected_pts_dict = {} for frame_id in dt.sorted_keys(self.dict_of_frame_dicts): frame_dict = self.dict_of_frame_dicts[frame_id] - proj_pts_reshaped = self.projected_points_given_corner_xyz_list( - frame_dict, final_heliostat_corner_xyz_list - ) + proj_pts_reshaped = self.projected_points_given_corner_xyz_list(frame_dict, final_heliostat_corner_xyz_list) proj_pts_list = proj_pts_reshaped.tolist() final_projected_pts_dict[frame_id] = proj_pts_list # Return. @@ -989,26 +772,14 @@ def construct_final_projected_points_dict(self, final_heliostat_spec): def save_and_analyze_heliostat_spec(self, hel_name_suffix, heliostat_spec): # Construct (x,y,z) list. - heliostat_corner_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - heliostat_spec - ) - ) + heliostat_corner_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec(heliostat_spec) # Save. - print( - 'In save_and_analyze_heliostat_spec(), heliostat_spec:' - ) # ?? SCAFFOLDING RCB -- WRITE TO DISK INSTEAD. + print('In save_and_analyze_heliostat_spec(), heliostat_spec:') # ?? SCAFFOLDING RCB -- WRITE TO DISK INSTEAD. print(heliostat_spec) # ?? SCAFFOLDING RCB -- WRITE TO DISK INSTEAD. output_hel_name = self.hel_name + hel_name_suffix - output_hel_dir = os.path.join( - self.output_construct_corners_3d_dir, output_hel_name - ) - output_hel_dir_body_ext = uh3a.save_heliostat_3d( - output_hel_name, heliostat_corner_xyz_list, output_hel_dir - ) - output_hel_spec_dir_body_ext = save_heliostat_spec( - heliostat_spec, output_hel_name, output_hel_dir - ) + output_hel_dir = os.path.join(self.output_construct_corners_3d_dir, output_hel_name) + output_hel_dir_body_ext = uh3a.save_heliostat_3d(output_hel_name, heliostat_corner_xyz_list, output_hel_dir) + output_hel_spec_dir_body_ext = save_heliostat_spec(heliostat_spec, output_hel_name, output_hel_dir) # Analyze. camera_rvec = np.array([-2.68359887, -0.2037837, 0.215282]).reshape( 3, 1 @@ -1030,22 +801,12 @@ def save_and_analyze_heliostat_spec(self, hel_name_suffix, heliostat_spec): def save_and_analyze_design_heliostat(self, design_spec): # Construct corner (x,y,z)) list. - design_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - design_spec - ) - ) + design_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec(design_spec) # Save. design_hel_name = self.hel_name + 'design' - output_design_dir = os.path.join( - self.output_data_dir, 'DesignHeliostats', design_hel_name - ) - design_hel_xyz_dir_body_ext = uh3a.save_heliostat_3d( - design_hel_name, design_xyz_list, output_design_dir - ) - design_hel_spec_dir_body_ext = save_heliostat_spec( - design_spec, design_hel_name, output_design_dir - ) + output_design_dir = os.path.join(self.output_data_dir, 'DesignHeliostats', design_hel_name) + design_hel_xyz_dir_body_ext = uh3a.save_heliostat_3d(design_hel_name, design_xyz_list, output_design_dir) + design_hel_spec_dir_body_ext = save_heliostat_spec(design_spec, design_hel_name, output_design_dir) # Analyze. camera_rvec = np.array([-2.68359887, -0.2037837, 0.215282]).reshape( 3, 1 @@ -1063,12 +824,7 @@ def save_and_analyze_design_heliostat(self, design_spec): distortion_coefficients=self.zero_distortion_coefficients, ) # Return. - return ( - design_spec, - design_xyz_list, - design_hel_xyz_dir_body_ext, - design_hel_spec_dir_body_ext, - ) + return (design_spec, design_xyz_list, design_hel_xyz_dir_body_ext, design_hel_spec_dir_body_ext) def save_frame_track_image(self, frame_id, observed_corner_xy_list): frame_track_step_suffix = 'frameTrack' @@ -1077,9 +833,7 @@ def save_frame_track_image(self, frame_id, observed_corner_xy_list): note = 'Depicted points are undistorted. Expect misalignments with image content.' else: note = None - output_frame_tracking_image_dir = os.path.join( - self.output_construct_corners_3d_dir, step_hel_name - ) + output_frame_tracking_image_dir = os.path.join(self.output_construct_corners_3d_dir, step_hel_name) ft.create_directories_if_necessary(output_frame_tracking_image_dir) uh3a.draw_annotated_frame_image( observed_corner_xy_list, @@ -1101,26 +855,16 @@ def save_and_analyze_final_heliostat_spec( final_projected_points_dict, ): # Construct (x,y,z) list. - heliostat_corner_xyz_list = ( - self.specifications.heliostat_corner_xyz_list_given_heliostat_spec( - heliostat_spec - ) - ) + heliostat_corner_xyz_list = self.specifications.heliostat_corner_xyz_list_given_heliostat_spec(heliostat_spec) # Save. output_hel_name = self.hel_name output_hel_dir = os.path.join(self.output_data_dir, output_hel_name) - output_hel_xyz_dir_body_ext = uh3a.save_heliostat_3d( - output_hel_name, heliostat_corner_xyz_list, output_hel_dir - ) - output_hel_spec_dir_body_ext = save_heliostat_spec( - heliostat_spec, output_hel_name, output_hel_dir - ) + output_hel_xyz_dir_body_ext = uh3a.save_heliostat_3d(output_hel_name, heliostat_corner_xyz_list, output_hel_dir) + output_hel_spec_dir_body_ext = save_heliostat_spec(heliostat_spec, output_hel_name, output_hel_dir) output_frame_dict_dir_body_ext = save_frame_dict_parameters( output_hel_name, self.dict_of_frame_dicts, output_hel_dir ) - output_log_dir_body_ext = save_search_log( - search_log, output_hel_name, output_hel_dir - ) + output_log_dir_body_ext = save_search_log(search_log, output_hel_name, output_hel_dir) output_overall_history_dir_body_ext = save_search_overall_error_history( search_overall_error_history, output_hel_name, output_hel_dir ) @@ -1128,10 +872,7 @@ def save_and_analyze_final_heliostat_spec( search_facet_error_history, output_hel_name, output_hel_dir ) output_complexity_dir_body_ext = save_computation_complexity( - self.calls_to_solve_pnp, - self.calls_to_project_points, - output_hel_name, - output_hel_dir, + self.calls_to_solve_pnp, self.calls_to_project_points, output_hel_name, output_hel_dir ) output_proj_pts_dir_body_ext = save_projected_points_dict( final_projected_points_dict, output_hel_name, output_hel_dir @@ -1169,9 +910,7 @@ def print_facets_canting_angles( ): # ?? SCAFFOLDING RCB -- MAKE GENERAL ACROSS HELIOSTAT DESIGNS for idx in range(0, 25): start_facet_idx = idx * 4 - facet_corner_xyz_list = heliostat_corner_xyz_list[ - start_facet_idx : (start_facet_idx + 4) - ] + facet_corner_xyz_list = heliostat_corner_xyz_list[start_facet_idx : (start_facet_idx + 4)] self.print_facets_canting_angles_aux(idx, facet_corner_xyz_list) # ?? SCAFFOLDING RCB -- MOVE SOMEWHERE ELSE. @@ -1198,12 +937,8 @@ def print_facets_canting_angles_aux( ] ) # Unit diagonals. - u_ll_to_ur = ll_to_ur / np.linalg.norm( - ll_to_ur - ) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE - u_lr_to_ul = lr_to_ul / np.linalg.norm( - lr_to_ul - ) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE + u_ll_to_ur = ll_to_ur / np.linalg.norm(ll_to_ur) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE + u_lr_to_ul = lr_to_ul / np.linalg.norm(lr_to_ul) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE cross_product = np.cross(u_ll_to_ur, u_lr_to_ul) norm_of_cross = np.linalg.norm(cross_product) diagonal_angle = np.arcsin(norm_of_cross) @@ -1256,12 +991,8 @@ def print_facet_canting_angles( ] ) # Unit diagonals. - u_ll_to_ur = ll_to_ur / np.linalg.norm( - ll_to_ur - ) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE - u_lr_to_ul = lr_to_ul / np.linalg.norm( - lr_to_ul - ) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE + u_ll_to_ur = ll_to_ur / np.linalg.norm(ll_to_ur) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE + u_lr_to_ul = lr_to_ul / np.linalg.norm(lr_to_ul) # ?? SCAFFOLDING RCB -- MAKE THIS A SUBROUTINE cross_product = np.cross(u_ll_to_ur, u_lr_to_ul) norm_of_cross = np.linalg.norm(cross_product) diagonal_angle = np.arcsin(norm_of_cross) @@ -1278,32 +1009,13 @@ def print_facet_canting_angles( facet_lr_xyz, facet_ll_xyz, ) - print( - 'In print_facet_canting_angles(), ll_to_ur:', - ll_to_ur[0], - ll_to_ur[1], - ll_to_ur[2], - ) - print( - 'In print_facet_canting_angles(), lr_to_ul:', - lr_to_ul[0], - lr_to_ul[1], - lr_to_ul[2], - ) + print('In print_facet_canting_angles(), ll_to_ur:', ll_to_ur[0], ll_to_ur[1], ll_to_ur[2]) + print('In print_facet_canting_angles(), lr_to_ul:', lr_to_ul[0], lr_to_ul[1], lr_to_ul[2]) print('In print_facet_canting_angles(), cross_product:', cross_product) print('In print_facet_canting_angles(), norm_of_cross=', norm_of_cross) - print( - 'In print_facet_canting_angles(), diagonal_angle(deg)=', - np.degrees(diagonal_angle), - ) - print( - 'In print_facet_canting_angles(), canting_angle_in_x_direction =', - canting_angle_in_x_direction, - ) - print( - 'In print_facet_canting_angles(), canting_angle_in_y_direction =', - canting_angle_in_y_direction, - ) + print('In print_facet_canting_angles(), diagonal_angle(deg)=', np.degrees(diagonal_angle)) + print('In print_facet_canting_angles(), canting_angle_in_x_direction =', canting_angle_in_x_direction) + print('In print_facet_canting_angles(), canting_angle_in_y_direction =', canting_angle_in_y_direction) # ACCESS @@ -1362,12 +1074,7 @@ def print_facet_canting_angles( def step_hel_name( self, frame_id, step_suffix ): # String to add to heliostat name to denote this step result. Cannot include "_" or "-" characters. - return ( - self.hel_name - + 'f' - + upf.frame_id_str_given_frame_id(frame_id, self.input_frame_id_format) - + step_suffix - ) + return self.hel_name + 'f' + upf.frame_id_str_given_frame_id(frame_id, self.input_frame_id_format) + step_suffix # RENDER @@ -1407,10 +1114,7 @@ def save_heliostat_spec( # Write to disk. output_file_body = hel_name + ' heliostat_spec' explain = output_file_body # I don't feel like something fancier. - print( - 'In save_heliostat_spec(), saving heliostat_spec to file:', - os.path.join(output_dir, output_file_body), - ) + print('In save_heliostat_spec(), saving heliostat_spec to file:', os.path.join(output_dir, output_file_body)) output_dir_body_ext = ft.write_csv_file( explain, # Explanatory string to include in notification output. None to skip. output_dir, # Directory to write file. See below if not exist. @@ -1430,9 +1134,7 @@ def save_frame_dict_parameters( hel_name, dict_of_frame_dicts, output_dir ): # ?? SCAFFOLDING RCB -- FRAME_CAMERA_POSE_DICT SHOULD BE A CLASS, AND THIS SHOULD BE A CLASS MEMBER FUNCTION # Prepare output. - heading_line = ( - 'frame_id,rvec_x,rvec_y,rvec_z,tvec_x,tvec_y,tvec_z,n_missing,use_for_metrology' - ) + heading_line = 'frame_id,rvec_x,rvec_y,rvec_z,tvec_x,tvec_y,tvec_z,n_missing,use_for_metrology' data_lines = [] for frame_id in dt.sorted_keys(dict_of_frame_dicts): # Fetch frame parameters. @@ -1456,10 +1158,7 @@ def save_frame_dict_parameters( # Write to disk. output_file_body = hel_name + '_frame_dict_parameters' explain = output_file_body # I don't feel like something fancier. - print( - 'In save_frame_dict_parameters(), saving camera poses to file:', - os.path.join(output_dir, output_file_body), - ) + print('In save_frame_dict_parameters(), saving camera poses to file:', os.path.join(output_dir, output_file_body)) output_dir_body_ext = ft.write_csv_file( explain, # Explanatory string to include in notification output. None to skip. output_dir, # Directory to write file. See below if not exist. @@ -1487,9 +1186,7 @@ def save_search_log(search_error_log, hel_name, output_dir): return output_dir_body_ext -def save_search_overall_error_history( - search_overall_error_history, hel_name, output_dir -): +def save_search_overall_error_history(search_overall_error_history, hel_name, output_dir): # Prepare output. heading_line = 'iteration,overall_error' data_lines = [] @@ -1514,9 +1211,7 @@ def save_search_overall_error_history( def save_search_facet_error_history(search_facet_error_history, hel_name, output_dir): # Prepare output. - heading_line = ( - 'iteration,facet_idx,best_var_name,best_min_error,best_min_error_del_var' - ) + heading_line = 'iteration,facet_idx,best_var_name,best_min_error,best_min_error_del_var' data_lines = [] for fb in search_facet_error_history: iteration = fb[0] @@ -1526,11 +1221,7 @@ def save_search_facet_error_history(search_facet_error_history, hel_name, output best_min_error_del_var = fb[4] data_lines.append( '{0:d},{1:d},{2:s},{3:.10f},{4:.6f}'.format( - iteration, - facet_idx, - best_var_name, - best_min_error, - best_min_error_del_var, + iteration, facet_idx, best_var_name, best_min_error, best_min_error_del_var ) ) # Write to disk. @@ -1548,9 +1239,7 @@ def save_search_facet_error_history(search_facet_error_history, hel_name, output return output_dir_body_ext -def save_computation_complexity( - calls_to_solve_pnp, calls_to_project_points, hel_name, output_dir -): +def save_computation_complexity(calls_to_solve_pnp, calls_to_project_points, hel_name, output_dir): # Assemble data. data_lines = [] data_lines.append('calls_to_solve_pnp,' + str(calls_to_solve_pnp)) diff --git a/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py b/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py index 5e09cce4..061cc719 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py +++ b/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py @@ -17,13 +17,9 @@ from DEPRECATED_utils import * # ?? SCAFFOLDING RCB -- ELIMINATE THIS from DEPRECATED_save_read import * # ?? SCAFFOLDING RCB -- ELIMINATE THIS import FrameNameXyList as fnxl -from opencsp.common.lib.render_control.RenderControlKeyCorners import ( - RenderControlKeyCorners, -) +from opencsp.common.lib.render_control.RenderControlKeyCorners import RenderControlKeyCorners -Component = NewType( - "Component", dict[str, Union[str, list[int], list[float], list[list[int]]]] -) +Component = NewType("Component", dict[str, Union[str, list[int], list[float], list[list[int]]]]) class KeyFrameCornerSearch: @@ -39,9 +35,7 @@ def __init__( key_frame_id, # Numerical key frame index. Uniquely determines the frame within the video. key_frame_id_str, # Not the same as str(key_frame_id), because this includes the proper number of leading zeros, etc. key_frame_img: np.ndarray, # The key frame image, already loaded. - list_of_name_polygons: list[ - tuple[str, list[list[int]]] - ], # List of expected [hel_name, polygon] pairs. + list_of_name_polygons: list[tuple[str, list[list[int]]]], # List of expected [hel_name, polygon] pairs. specifications, # Solar field specifications. # ?? SCAFFOLDING RCB -- REPLACE THIS WITH MASTER INFORMATION LOADED FROM DISK FILES. # Input/output sources. output_construction_dir, # Where to save the detailed image processing step-by-step plots. @@ -55,9 +49,7 @@ def __init__( Retrieve results with projected_fnxl() """ - print( - 'In KeyFrameCornerSearch.__init__()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.__init__()...') # ?? SCAFFOLDING RCB -- TEMPORARY # Store input. self.key_frame_id = key_frame_id @@ -89,20 +81,14 @@ def __init__( # Facet boundaries self.frame['boundaries'], self.frame['boundaries_img'] = self.facet_boundaries() # Connected_components - (self.frame['components'], self.frame['components_img']) = ( - self.connected_components() - ) + (self.frame['components'], self.frame['components_img']) = self.connected_components() # Filtered connected_components - (self.frame['filt_components'], self.frame['filt_components_img']) = ( - self.filter_connected_components() - ) + (self.frame['filt_components'], self.frame['filt_components_img']) = self.filter_connected_components() # TODO BGB make sure none of the components bridge the gap between mirrors # Fitted lines connected components self.frame['fitted_lines_components'] = self.fitted_lines_connected_components() # Line inliers - self.frame['fitted_lines_inliers_components'] = ( - self.fitted_lines_inliers_components() - ) + self.frame['fitted_lines_inliers_components'] = self.fitted_lines_inliers_components() # Corners self.frame['corners'] = self.find_corners() # Facets @@ -139,16 +125,12 @@ def successful(self): """ Returns true if the image processing successfully produced final corners. """ - return ('all_projected_corners' in self.frame) and ( - len(self.frame['all_projected_corners']) > 0 - ) + return ('all_projected_corners' in self.frame) and (len(self.frame['all_projected_corners']) > 0) # IMAGE PROCESSING def draw_img_polygons(self): - print( - 'In KeyFrameCornerSearch.draw_img_polygons(), entering routine...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.draw_img_polygons(), entering routine...') # ?? SCAFFOLDING RCB -- TEMPORARY if self.render_control.draw_img_box: img = self.frame['key_frame_img'] plt.figure() @@ -156,15 +138,9 @@ def draw_img_polygons(self): for ( name_polygon - ) in ( - self.list_of_name_polygons - ): # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - name = name_polygon[ - 0 - ] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - polygon = name_polygon[ - 1 - ] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + ) in self.list_of_name_polygons: # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + name = name_polygon[0] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + polygon = name_polygon[1] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. color = 'g' # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. # Draw the polygon. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. closed_xy_list = polygon.copy() @@ -202,11 +178,7 @@ def draw_img_polygons(self): ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. plt.savefig( - os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_00_img_box.png', - ), - dpi=500, + os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str + '_00_img_box.png'), dpi=500 ) plt.close() @@ -274,9 +246,7 @@ def skyhsv(self): # sky_img = cv.bitwise_and(img_rgb, img_rgb, mask=sky) if self.render_control.draw_skyhsv: save_image( - img=sky, - imgname=self.key_frame_id_str + '_02_skyhsv.png', - path=self.frame['output_construction_dir'], + img=sky, imgname=self.key_frame_id_str + '_02_skyhsv.png', path=self.frame['output_construction_dir'] ) if self.render_control.draw_skyhsv_fig: save_fig( @@ -358,16 +328,13 @@ def facet_boundaries(self): boundaries: a 0 (not a boundary pixel) or 1 (boundary pixel) ndarray that is the same size as self.frame['key_frame_img'] boundaries_img: an ndarray with with boundary pixels colored based on whether they are a top/left/right/bottom edge pixel """ - print( - 'In KeyFrameCornerSearch.facet_boundaries()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.facet_boundaries()...') # ?? SCAFFOLDING RCB -- TEMPORARY img = self.frame['key_frame_img'] edges = self.frame['edges'] row_edges, col_edges = np.nonzero(edges) print( - 'In KeyFrameCornerSearch.facet_boundaries(), number of edge pixels len(row_edges) =', - len(row_edges), + 'In KeyFrameCornerSearch.facet_boundaries(), number of edge pixels len(row_edges) =', len(row_edges) ) # ?? SCAFFOLDING RCB -- TEMPORARY boundaries_img = 0 * img boundaries_rows = [] @@ -433,12 +400,7 @@ def facet_boundaries(self): return boundaries, boundaries_img def is_boundary_pixel( - self, - row: int, - col: int, - btype: str, - required_sky_width: int = None, - ignore_margin: int = None, + self, row: int, col: int, btype: str, required_sky_width: int = None, ignore_margin: int = None ) -> bool: """Checks if the pixel at the given row/col is a mirror edge boundary pixel (it is assumed to be an edge pixel). @@ -503,9 +465,7 @@ def connected_components(self) -> tuple[list[Component], np.ndarray]: components: the dict['original_pixels'] entries contains the list of component pixels. component_img: the image with the components drawn on top of it.""" - def construct_component( - row: int, col: int, btype: str, color: list[int], img: np.ndarray - ) -> Component: + def construct_component(row: int, col: int, btype: str, color: list[int], img: np.ndarray) -> Component: """Builds out a list of adjacent pixels that all have the same color (including diagonals). Parameters @@ -534,11 +494,7 @@ def construct_component( and [r - 1, c - 1] not in horizon ): horizon.append([r - 1, c - 1]) - if ( - ((r - 1) >= 0) - and (img[r - 1, c, :] == color).all(axis=-1) - and [r - 1, c] not in horizon - ): + if ((r - 1) >= 0) and (img[r - 1, c, :] == color).all(axis=-1) and [r - 1, c] not in horizon: horizon.append([r - 1, c]) if ( ((r - 1) >= 0) @@ -547,17 +503,9 @@ def construct_component( and [r - 1, c + 1] not in horizon ): horizon.append([r - 1, c + 1]) - if ( - ((c - 1) >= 0) - and (img[r, c - 1, :] == color).all(axis=-1) - and [r, c - 1] not in horizon - ): + if ((c - 1) >= 0) and (img[r, c - 1, :] == color).all(axis=-1) and [r, c - 1] not in horizon: horizon.append([r, c - 1]) - if ( - ((c + 1) < max_col) - and (img[r, c + 1, :] == color).all(axis=-1) - and [r, c + 1] not in horizon - ): + if ((c + 1) < max_col) and (img[r, c + 1, :] == color).all(axis=-1) and [r, c + 1] not in horizon: horizon.append([r, c + 1]) if ( ((r + 1) < max_row) @@ -566,11 +514,7 @@ def construct_component( and [r + 1, c - 1] not in horizon ): horizon.append([r + 1, c - 1]) - if ( - ((r + 1) < max_row) - and (img[r + 1, c, :] == color).all(axis=-1) - and [r + 1, c] not in horizon - ): + if ((r + 1) < max_row) and (img[r + 1, c, :] == color).all(axis=-1) and [r + 1, c] not in horizon: horizon.append([r + 1, c]) if ( ((r + 1) < max_row) @@ -592,9 +536,7 @@ def construct_component_img(components, img): components_img[pixel[0], pixel[1], :] = color return components_img - print( - 'In KeyFrameCornerSearch.connected_components()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.connected_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY img = self.frame['key_frame_img'] boundaries = self.frame['boundaries'] boundaries_img = self.frame['boundaries_img'] @@ -639,9 +581,7 @@ def construct_component_img(components, img): if self.render_control.write_components: save_connected_components( - components=components, - filename='components.csv', - path=self.frame['output_construction_dir'], + components=components, filename='components.csv', path=self.frame['output_construction_dir'] ) return components, components_img @@ -658,9 +598,7 @@ def construct_component_img(components, img): components_img[pixel[0], pixel[1], :] = color return components_img - print( - 'In KeyFrameCornerSearch.filter_connected_components()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.filter_connected_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY img = self.frame['key_frame_img'] components = self.frame['components'] @@ -669,10 +607,7 @@ def construct_component_img(components, img): if len(component['original_pixels']) >= COMPONENT_THRESHOLD: filtered_components.append(component) - if ( - self.render_control.draw_filt_components - or self.render_control.draw_filt_components_fig - ): + if self.render_control.draw_filt_components or self.render_control.draw_filt_components_fig: filt_connected_comp_img = construct_component_img(filtered_components, img) if self.render_control.draw_filt_components: save_image( @@ -689,18 +624,14 @@ def construct_component_img(components, img): if self.render_control.write_filt_components: save_connected_components( - filtered_components, - filename='filt_components.csv', - path=self.frame['output_construction_dir'], + filtered_components, filename='filt_components.csv', path=self.frame['output_construction_dir'] ) return filtered_components, filt_connected_comp_img def fitted_lines_connected_components(self, type_fit='regression'): """Does an initial line fit on the pixels in the components.""" - print( - 'In KeyFrameCornerSearch.fitted_lines_connected_components()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.fitted_lines_connected_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY line_components = [] components = self.frame['filt_components'] for component in components: @@ -731,9 +662,7 @@ def fitted_lines_inliers_components(self): def find_inliers_component(component: Component): A, B, C = component['original_line_hom_coef'] # from Ax + By + C = 0 - btype: str = component[ - 'boundary_type' - ] # 'left', 'right', 'top', or 'bottom' + btype: str = component['boundary_type'] # 'left', 'right', 'top', or 'bottom' original_pixels: list[list[int]] = component['original_pixels'] required_inliers = int(round(INLIERS_THRESHOLD * len(original_pixels))) @@ -762,9 +691,7 @@ def find_inliers_component(component: Component): component['inliers_line_residual'] = component['original_line_residual'] component['inliers_line_points'] = component['original_line_points'] else: - row, col = np.array([a[0] for a in inliers]), np.array( - [a[1] for a in inliers] - ) + row, col = np.array([a[0] for a in inliers]), np.array([a[1] for a in inliers]) if btype == 'left' or btype == 'right': # expected horizontal line in terms of row x, y = row, col @@ -791,14 +718,8 @@ def find_inliers_component(component: Component): x1, y1, ] # point at first x pixel [first y for left/right], with the y [x] adjusted to lie on the fit line - A_inl, B_inl, C_inl = set_proper_hom_coef_sign( - start_point, btype, A_inl, B_inl, C_inl - ) - outliers = [ - [pixel[0], pixel[1]] - for pixel in original_pixels - if pixel not in inliers - ] + A_inl, B_inl, C_inl = set_proper_hom_coef_sign(start_point, btype, A_inl, B_inl, C_inl) + outliers = [[pixel[0], pixel[1]] for pixel in original_pixels if pixel not in inliers] component['tolerance'] = tolerance component['outliers_pixels'] = outliers component['inliers_pixels'] = inliers @@ -808,9 +729,7 @@ def find_inliers_component(component: Component): return component - print( - 'In KeyFrameCornerSearch.fitted_lines_inliers_components()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.fitted_lines_inliers_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY components = self.frame['fitted_lines_components'] inliers_components: list[Component] = [] for component in components: @@ -837,18 +756,14 @@ def find_corners(self, corners_type=None): ------- corners: a list of corner dict lists [[TL],[TR],[BR],[BL]], where each dict contains the xy 'point' for the corner. """ - print( - 'In KeyFrameCornerSearch.find_corners()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.find_corners()...') # ?? SCAFFOLDING RCB -- TEMPORARY top_left_corners: dict[str, Any] = [] top_right_corners: dict[str, Any] = [] bottom_right_corners: dict[str, Any] = [] bottom_left_corners: dict[str, Any] = [] output_construction_dir = self.frame['output_construction_dir'] corners_types = ( - [corners_type] - if (corners_type != None) - else ['top_left', 'top_right', 'bottom_right', 'bottom_left'] + [corners_type] if (corners_type != None) else ['top_left', 'top_right', 'bottom_right', 'bottom_left'] ) max_row = self.frame['key_frame_img'].shape[0] @@ -916,25 +831,13 @@ def find_corners(self, corners_type=None): # keep the component's representative pixels = component['inliers_pixels'] if corners_type == 'top_left': # left edge - representative = sorted( - pixels, key=lambda pix: pix[0], reverse=False - )[ - 0 - ] # ascending + representative = sorted(pixels, key=lambda pix: pix[0], reverse=False)[0] # ascending elif corners_type == 'top_right': # top edge - representative = sorted( - pixels, key=lambda pix: pix[1], reverse=True - )[ - 0 - ] # descending + representative = sorted(pixels, key=lambda pix: pix[1], reverse=True)[0] # descending elif corners_type == 'bottom_right': # right edge - representative = sorted( - pixels, key=lambda pix: pix[0], reverse=True - )[0] + representative = sorted(pixels, key=lambda pix: pix[0], reverse=True)[0] elif corners_type == 'bottom_left': # bottom edge - representative = sorted( - pixels, key=lambda pix: pix[1], reverse=False - )[0] + representative = sorted(pixels, key=lambda pix: pix[1], reverse=False)[0] representative_tomatched = representative # keep the closest ones @@ -942,46 +845,28 @@ def find_corners(self, corners_type=None): for candidate in components_candidates: pixels = candidate['inliers_pixels'] if corners_type == 'top_left': # top edge - representative = sorted( - pixels, key=lambda pix: pix[1], reverse=False - )[0] + representative = sorted(pixels, key=lambda pix: pix[1], reverse=False)[0] elif corners_type == 'top_right': # right edge - representative = sorted( - pixels, key=lambda pix: pix[0], reverse=False - )[0] + representative = sorted(pixels, key=lambda pix: pix[0], reverse=False)[0] elif corners_type == 'bottom_right': # bottom edge - representative = sorted( - pixels, key=lambda pix: pix[1], reverse=True - )[0] + representative = sorted(pixels, key=lambda pix: pix[1], reverse=True)[0] elif corners_type == 'bottom_left': # left edge - representative = sorted( - pixels, key=lambda pix: pix[0], reverse=True - )[0] + representative = sorted(pixels, key=lambda pix: pix[0], reverse=True)[0] representatives_candidates.append(representative) distances = [ - euclidean_distance(representative_tomatched, candidate) - for candidate in representatives_candidates + euclidean_distance(representative_tomatched, candidate) for candidate in representatives_candidates ] found_candidate_indx = np.argsort(np.array(distances))[0] found_candidate = components_candidates[found_candidate_indx] component_points = component['inliers_line_points'] - component_start_point = [ - component_points[0], - component_points[1], - ] # col, row + component_start_point = [component_points[0], component_points[1]] # col, row component_end_point = [component_points[2], component_points[3]] found_candidate_points = found_candidate['inliers_line_points'] - found_candidate_start_point = [ - found_candidate_points[0], - found_candidate_points[1], - ] - found_candidate_end_point = [ - found_candidate_points[2], - found_candidate_points[3], - ] + found_candidate_start_point = [found_candidate_points[0], found_candidate_points[1]] + found_candidate_end_point = [found_candidate_points[2], found_candidate_points[3]] corner = intersection_point( component_start_point[0], @@ -1000,16 +885,12 @@ def find_corners(self, corners_type=None): avg_row = sum([p[0] for p in pixels]) / len(pixels) avg_col = sum([p[1] for p in pixels]) / len(pixels) - distance1 = euclidean_distance( - [corner[1], corner[0]], [avg_row, avg_col] - ) + distance1 = euclidean_distance([corner[1], corner[0]], [avg_row, avg_col]) pixels = found_candidate['inliers_pixels'] avg_row = sum([p[0] for p in pixels]) / len(pixels) avg_col = sum([p[1] for p in pixels]) / len(pixels) - distance2 = euclidean_distance( - [corner[1], corner[0]], [avg_row, avg_col] - ) + distance2 = euclidean_distance([corner[1], corner[0]], [avg_row, avg_col]) distance = max([distance1, distance2]) @@ -1022,12 +903,7 @@ def find_corners(self, corners_type=None): # < euclidean_distance(representative_tomatched, [avg_row, avg_col]))): # continue - if ( - corner[1] >= 0 - and corner[1] < max_row - and corner[0] >= 0 - and corner[0] < max_col - ): + if corner[1] >= 0 and corner[1] < max_row and corner[0] >= 0 and corner[0] < max_col: corner_structure = {} key1 = 'edge_coeff' key2 = 'edge_pixels' @@ -1092,18 +968,10 @@ def find_corners(self, corners_type=None): plt.figure() plt.imshow(self.frame['edges_img']) plt.scatter( - [x[0] for x in top_left], - [x[1] for x in top_left], - marker='o', - facecolor=PLT_TOP_LEFT_COLOR, - s=5, + [x[0] for x in top_left], [x[1] for x in top_left], marker='o', facecolor=PLT_TOP_LEFT_COLOR, s=5 ) plt.scatter( - [x[0] for x in top_right], - [x[1] for x in top_right], - marker='o', - facecolor=PLT_TOP_RIGHT_COLOR, - s=5, + [x[0] for x in top_right], [x[1] for x in top_right], marker='o', facecolor=PLT_TOP_RIGHT_COLOR, s=5 ) plt.scatter( [x[0] for x in bottom_right], @@ -1120,11 +988,7 @@ def find_corners(self, corners_type=None): s=5, ) plt.savefig( - os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_08_corners.png', - ), - dpi=200, + os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str + '_08_corners.png'), dpi=200 ) plt.close() @@ -1157,21 +1021,11 @@ def find_corners(self, corners_type=None): corners_type='bottom_left', ) - return [ - top_left_corners, - top_right_corners, - bottom_right_corners, - bottom_left_corners, - ] + return [top_left_corners, top_right_corners, bottom_right_corners, bottom_left_corners] def facets(self): print('In KeyFrameCornerSearch.facets()...') # ?? SCAFFOLDING RCB -- TEMPORARY - ( - top_left_corners, - top_right_corners, - bottom_right_corners, - bottom_left_corners, - ) = self.frame['corners'] + (top_left_corners, top_right_corners, bottom_right_corners, bottom_left_corners) = self.frame['corners'] already_matched_corners = [] facets = [] # For each Top Left corner @@ -1182,8 +1036,7 @@ def facets(self): A_left, B_left, C_left = top_left_corner['left_edge_coeff'] # distances with top right corners top_right_distances = [ - euclidean_distance(top_left_point, top_right_corner['point']) - for top_right_corner in top_right_corners + euclidean_distance(top_left_point, top_right_corner['point']) for top_right_corner in top_right_corners ] indices = np.argsort(np.array(top_right_distances)) flag = True @@ -1204,20 +1057,13 @@ def facets(self): ) distance = euclidean_distance(top_left_point, interpoint) - if ( - A_left * top_right_point[0] + B_left * top_right_point[1] + C_left - < 0 - ) and ( # on the right side + if (A_left * top_right_point[0] + B_left * top_right_point[1] + C_left < 0) and ( # on the right side distance <= INTER_POINT_DISTANCE ): # ?? MAGIC NUMBER flag = False break - if ( - flag - or top_left_corner in already_matched_corners - or top_right_corner in already_matched_corners - ): + if flag or top_left_corner in already_matched_corners or top_right_corner in already_matched_corners: continue # Finding Bottom Right corner @@ -1249,17 +1095,9 @@ def facets(self): distance = euclidean_distance(top_right_point, interpoint) if ( - ( - A_top * bottom_right_point[0] - + B_top * bottom_right_point[1] - + C_top - < 0 - ) # on the right side + (A_top * bottom_right_point[0] + B_top * bottom_right_point[1] + C_top < 0) # on the right side and ( - A_left * bottom_right_point[0] - + B_left * bottom_right_point[1] - + C_left - < 0 + A_left * bottom_right_point[0] + B_left * bottom_right_point[1] + C_left < 0 ) # on the right side and (distance <= INTER_POINT_DISTANCE) ): # ?? MAGIC NUMBER @@ -1311,18 +1149,8 @@ def facets(self): distance = euclidean_distance(bottom_right_point, interpoint) distance_alt = euclidean_distance(interpoint_alt, bottom_left_point) if ( - ( - A_right * bottom_left_point[0] - + B_right * bottom_left_point[1] - + C_right - < 0 - ) # on the right side - and ( - A_top * bottom_left_point[0] - + B_top * bottom_left_point[1] - + C_top - < 0 - ) # on the below side + (A_right * bottom_left_point[0] + B_right * bottom_left_point[1] + C_right < 0) # on the right side + and (A_top * bottom_left_point[0] + B_top * bottom_left_point[1] + C_top < 0) # on the below side and (distance <= INTER_POINT_DISTANCE) # ?? MAGIC NUMBER and (distance_alt <= INTER_POINT_DISTANCE) ): # ?? MAGIC NUMBER @@ -1364,18 +1192,10 @@ def facets(self): required_sky_width = int(REQUIRED_SKY_WIDTH / 4) if ( - self.is_boundary_pixel( - r, c, 'left', required_sky_width=required_sky_width - ) - and self.is_boundary_pixel( - r, c, 'top', required_sky_width=required_sky_width - ) - and self.is_boundary_pixel( - r, c, 'right', required_sky_width=required_sky_width - ) - and self.is_boundary_pixel( - r, c, 'bottom', required_sky_width=required_sky_width - ) + self.is_boundary_pixel(r, c, 'left', required_sky_width=required_sky_width) + and self.is_boundary_pixel(r, c, 'top', required_sky_width=required_sky_width) + and self.is_boundary_pixel(r, c, 'right', required_sky_width=required_sky_width) + and self.is_boundary_pixel(r, c, 'bottom', required_sky_width=required_sky_width) ): flag = True if not flag: @@ -1402,53 +1222,23 @@ def facets(self): bottom_right_corner = facet['bottom_right']['point'] bottom_left_corner = facet['bottom_left']['point'] center = facet['center'] - plt.scatter( - top_left_corner[0], - top_left_corner[1], - facecolor=PLT_TOP_LEFT_COLOR, - s=1, - ) - plt.scatter( - top_right_corner[0], - top_right_corner[1], - facecolor=PLT_TOP_RIGHT_COLOR, - s=1, - ) - plt.scatter( - bottom_right_corner[0], - bottom_right_corner[1], - facecolor=PLT_BOTTOM_RIGHT_COLOR, - s=1, - ) - plt.scatter( - bottom_left_corner[0], - bottom_left_corner[1], - facecolor=PLT_BOTTOM_LEFT_COLOR, - s=1, - ) + plt.scatter(top_left_corner[0], top_left_corner[1], facecolor=PLT_TOP_LEFT_COLOR, s=1) + plt.scatter(top_right_corner[0], top_right_corner[1], facecolor=PLT_TOP_RIGHT_COLOR, s=1) + plt.scatter(bottom_right_corner[0], bottom_right_corner[1], facecolor=PLT_BOTTOM_RIGHT_COLOR, s=1) + plt.scatter(bottom_left_corner[0], bottom_left_corner[1], facecolor=PLT_BOTTOM_LEFT_COLOR, s=1) plt.scatter(center[0], center[1], facecolor=PLT_CENTER_COLOR, s=1) plt.savefig( - os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_09_facets.png', - ), - dpi=200, + os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str + '_09_facets.png'), dpi=200 ) plt.close() if self.render_control.write_facets: - save_corners_facets( - facets=facets, - filename='facets.csv', - path=self.frame['output_construction_dir'], - ) + save_corners_facets(facets=facets, filename='facets.csv', path=self.frame['output_construction_dir']) return facets def filter_facets_polygons(self): - print( - 'In KeyFrameCornerSearch.filter_facets_polygons()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.filter_facets_polygons()...') # ?? SCAFFOLDING RCB -- TEMPORARY all_facets = self.frame['facets'] # filter the facets @@ -1465,12 +1255,7 @@ def filter_facets_polygons(self): x_max = max(x_list) y_min = min(y_list) y_max = max(y_list) - if ( - (x_min < center[0]) - and (center[0] < x_max) - and (y_min < center[1]) - and (center[1] < y_max) - ): + if (x_min < center[0]) and (center[0] < x_max) and (y_min < center[1]) and (center[1] < y_max): in_polygon = True break if in_polygon: @@ -1506,10 +1291,7 @@ def filter_facets_polygons(self): for facet in filtered_facets: center = facet['center'] if (center not in assigned_centers) and ( - (x_min < center[0]) - and (center[0] < x_max) - and (y_min < center[1]) - and (center[1] < y_max) + (x_min < center[0]) and (center[0] < x_max) and (y_min < center[1]) and (center[1] < y_max) ): assigned_centers.append(center) heliostat['facets'].append(facet) @@ -1524,36 +1306,13 @@ def filter_facets_polygons(self): bottom_right_corner = facet['bottom_right']['point'] bottom_left_corner = facet['bottom_left']['point'] center = facet['center'] - plt.scatter( - top_left_corner[0], - top_left_corner[1], - facecolor=PLT_TOP_LEFT_COLOR, - s=1, - ) - plt.scatter( - top_right_corner[0], - top_right_corner[1], - facecolor=PLT_TOP_RIGHT_COLOR, - s=1, - ) - plt.scatter( - bottom_right_corner[0], - bottom_right_corner[1], - facecolor=PLT_BOTTOM_RIGHT_COLOR, - s=1, - ) - plt.scatter( - bottom_left_corner[0], - bottom_left_corner[1], - facecolor=PLT_BOTTOM_LEFT_COLOR, - s=1, - ) + plt.scatter(top_left_corner[0], top_left_corner[1], facecolor=PLT_TOP_LEFT_COLOR, s=1) + plt.scatter(top_right_corner[0], top_right_corner[1], facecolor=PLT_TOP_RIGHT_COLOR, s=1) + plt.scatter(bottom_right_corner[0], bottom_right_corner[1], facecolor=PLT_BOTTOM_RIGHT_COLOR, s=1) + plt.scatter(bottom_left_corner[0], bottom_left_corner[1], facecolor=PLT_BOTTOM_LEFT_COLOR, s=1) plt.scatter(center[0], center[1], facecolor=PLT_CENTER_COLOR, s=1) plt.savefig( - os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_10_filtered_facets.png', - ), + os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str + '_10_filtered_facets.png'), dpi=200, ) plt.close() @@ -1570,8 +1329,7 @@ def filter_facets_polygons(self): plt.scatter(center[0], center[1], facecolor=color, s=1) plt.savefig( os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_11_filtered_heliostats.png', + self.frame['output_construction_dir'], self.key_frame_id_str + '_11_filtered_heliostats.png' ), dpi=200, ) @@ -1583,22 +1341,13 @@ def top_row_facets(self): """ Assumption: We trust first row in terms of correct found centers """ - print( - 'In KeyFrameCornerSearch.top_row_facets()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.top_row_facets()...') # ?? SCAFFOLDING RCB -- TEMPORARY for heliostat in self.frame['heliostats']: facets = heliostat['facets'] # this is a list - facets = sorted( - facets, key=lambda f: f['center'][1] - ) # sort in terms of rows - top_row_facets = facets[ - : self.specifications.facets_per_row - ] # top row facets + facets = sorted(facets, key=lambda f: f['center'][1]) # sort in terms of rows + top_row_facets = facets[: self.specifications.facets_per_row] # top row facets keys = [ - [ - ['bottom_right', 'bottom_edge_coeff'], - ['bottom_left', 'bottom_edge_coeff'], - ], + [['bottom_right', 'bottom_edge_coeff'], ['bottom_left', 'bottom_edge_coeff']], [['top_left', 'top_edge_coeff'], ['top_right', 'top_edge_coeff']], ] for type_of_keys in keys: @@ -1632,14 +1381,9 @@ def top_row_facets(self): for heliostat in self.frame['heliostats']: top_row_facets = heliostat['top_row_facets'] for facet in top_row_facets: - plt.scatter( - facet['center'][0], facet['center'][1], s=1, facecolor='m' - ) + plt.scatter(facet['center'][0], facet['center'][1], s=1, facecolor='m') plt.savefig( - os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_14_top_row_facets.png', - ), + os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str + '_14_top_row_facets.png'), dpi=200, ) plt.close() @@ -1658,18 +1402,12 @@ def find_combinations(inp, out): out.append(inp[0]) find_combinations(inp[1:], out[:]) - print( - 'In KeyFrameCornerSearch.classify_top_row_facets()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print('In KeyFrameCornerSearch.classify_top_row_facets()...') # ?? SCAFFOLDING RCB -- TEMPORARY for heliostat in self.frame['heliostats']: top_row_facets = heliostat['top_row_facets'] - top_row_facets = sorted( - top_row_facets, key=lambda f: f['center'][0] - ) # sort in terms of column + top_row_facets = sorted(top_row_facets, key=lambda f: f['center'][0]) # sort in terms of column ids = [i for i in range(0, self.specifications.facets_per_row)] - if ( - len(top_row_facets) == self.specifications.facets_per_row - ): # all facets have been identified + if len(top_row_facets) == self.specifications.facets_per_row: # all facets have been identified for facet_indx in range(0, len(top_row_facets)): facet = top_row_facets[facet_indx] facet['id'] = facet_indx @@ -1678,9 +1416,7 @@ def find_combinations(inp, out): # all diferent combinations, brute-force - Complexity O(self.specifications.facets_per_row!) all_combinations = [] find_combinations(ids, []) - combinations = [ - x for x in all_combinations if len(x) == len(top_row_facets) - ] + combinations = [x for x in all_combinations if len(x) == len(top_row_facets)] # image points img_centers2d = [] @@ -1704,25 +1440,18 @@ def find_combinations(inp, out): for i in combination: obj_centers3d.append(centers3d[i]) corners_indx = i * self.specifications.corners_per_facet - for indx in range( - corners_indx, - corners_indx + self.specifications.corners_per_facet, - ): + for indx in range(corners_indx, corners_indx + self.specifications.corners_per_facet): obj_corners3d.append(corners3d[indx]) ## Projection points3d = np.array(obj_corners3d + obj_centers3d).astype('float32') h, w = self.frame['key_frame_img'].shape[:2] - _, _, _, _, error = solvePNP( - points3d, points2d, h, w, pnptype=self.solvePnPtype - ) + _, _, _, _, error = solvePNP(points3d, points2d, h, w, pnptype=self.solvePnPtype) proj_errors.append(error) # select based on projected error best_indx = np.argsort(np.array(proj_errors))[0] selected_combination = combinations[best_indx] - for facet_indx, i in zip( - range(0, len(top_row_facets)), selected_combination - ): + for facet_indx, i in zip(range(0, len(top_row_facets)), selected_combination): facet = top_row_facets[facet_indx] facet['id'] = i top_row_facets[facet_indx] = facet @@ -1737,25 +1466,18 @@ def find_combinations(inp, out): center = facet['center'] label = facet['id'] plt.scatter(center[0], center[1], s=1, facecolor='m') - plt.annotate( - str(label), (center[0], center[1]), color='c', fontsize=5 - ) + plt.annotate(str(label), (center[0], center[1]), color='c', fontsize=5) plt.savefig( os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_15_top_row_facets_labels.png', + self.frame['output_construction_dir'], self.key_frame_id_str + '_15_top_row_facets_labels.png' ), dpi=200, ) plt.close() - def project_and_confirm( - self, canny_levels=['tight', 'normal', 'light'], iterations=5 - ): - print( - 'In KeyFrameCornerSearch.project_and_confirm()...' - ) # ?? SCAFFOLDING RCB -- TEMPORARY + def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterations=5): + print('In KeyFrameCornerSearch.project_and_confirm()...') # ?? SCAFFOLDING RCB -- TEMPORARY edge_img = self.frame['edges_img'] # demonstration h, w = self.frame['key_frame_img'].shape[:2] # facet_centoids_csv = self.facet_centroids_dir_body_ext + 'csv_files/' + 'Facets_Centroids.csv' @@ -1782,9 +1504,7 @@ def project_and_confirm( for label in labels: objcenters.append(centers3d[label]) corner_indx = label * self.specifications.corners_per_facet - for indx in range( - corner_indx, corner_indx + self.specifications.corners_per_facet - ): + for indx in range(corner_indx, corner_indx + self.specifications.corners_per_facet): objcorners.append(corners3d[indx]) points3d = np.array(objcorners + objcenters).astype('float32') @@ -1811,21 +1531,14 @@ def project_and_confirm( '; len(points2d) =', len(points2d), ) # ?? SCAFFOLDING RCB -- TEMPORARY - mtx, dist, rvec, tvec, pnp_error = solvePNP( - points3d, points2d, h, w, pnptype=self.solvePnPtype - ) + mtx, dist, rvec, tvec, pnp_error = solvePNP(points3d, points2d, h, w, pnptype=self.solvePnPtype) - proj_corners, _ = cv.projectPoints( - np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist - ) + proj_corners, _ = cv.projectPoints(np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist) proj_corners = proj_corners.reshape(-1, 2) proj_corners = proj_corners.tolist() confirmed_corners, projected_corners = self.confirm( - proj_corners, - corners3d, - canny_levels=canny_levels, - iterations=iterations, + proj_corners, corners3d, canny_levels=canny_levels, iterations=iterations ) if ( @@ -1855,21 +1568,13 @@ def project_and_confirm( all_projected_corners += heliostat[ 'projected_corners' ] # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. - list_of_name_confirmed_corners.append( - [heliostat['name'], heliostat['confirmed_corners']] - ) - list_of_name_projected_corners.append( - [heliostat['name'], heliostat['projected_corners']] - ) + list_of_name_confirmed_corners.append([heliostat['name'], heliostat['confirmed_corners']]) + list_of_name_projected_corners.append([heliostat['name'], heliostat['projected_corners']]) # FrameNameXyList objects. confirmed_fnxl = fnxl.FrameNameXyList() projected_fnxl = fnxl.FrameNameXyList() - confirmed_fnxl.add_list_of_name_xy_lists( - self.key_frame_id, list_of_name_confirmed_corners - ) - projected_fnxl.add_list_of_name_xy_lists( - self.key_frame_id, list_of_name_projected_corners - ) + confirmed_fnxl.add_list_of_name_xy_lists(self.key_frame_id, list_of_name_confirmed_corners) + projected_fnxl.add_list_of_name_xy_lists(self.key_frame_id, list_of_name_projected_corners) # Store in this class object. self.frame['all_confirmed_corners'] = all_confirmed_corners self.frame['all_projected_corners'] = all_projected_corners @@ -1891,17 +1596,13 @@ def project_and_confirm( if self.render_control.write_confirmed_fnxl: confirmed_fnxl.save( os.path.join( - self.frame['output_construction_dir'], - 'csv_files', - (self.key_frame_id_str + '_confirmed_fnxl.csv'), + self.frame['output_construction_dir'], 'csv_files', (self.key_frame_id_str + '_confirmed_fnxl.csv') ) ) # ?? SCAFFOLDING RCB -- INSTEAD OF ADDING "CSV_FILES" HERE, SHOULD BE PASSED IN THAT WAY. SAVE TO ANSWER DIRECTORY? -- PROBABLY NOT; INSEAD SAVE FROM CALLER? if self.render_control.write_projected_fnxl: projected_fnxl.save( os.path.join( - self.frame['output_construction_dir'], - 'csv_files', - (self.key_frame_id_str + '_projected_fnxl.csv'), + self.frame['output_construction_dir'], 'csv_files', (self.key_frame_id_str + '_projected_fnxl.csv') ) ) # ?? SCAFFOLDING RCB -- INSTEAD OF ADDING "CSV_FILES" HERE, SHOULD BE PASSED IN THAT WAY. SAVE TO ANSWER DIRECTORY? -- PROBABLY NOT; INSEAD SAVE FROM CALLER? @@ -1911,9 +1612,7 @@ def project_and_confirm( plt.figure() plt.imshow(edge_img) for final_heliostat in final_heliostats: - found_confirmed_corners = self.filter_not_found_corners( - final_heliostat['confirmed_corners'] - ) + found_confirmed_corners = self.filter_not_found_corners(final_heliostat['confirmed_corners']) # Draw the heliostat name. if ( len(found_confirmed_corners) > 0 @@ -1945,8 +1644,7 @@ def project_and_confirm( # Save the figure. plt.savefig( os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_16_confirmed_corners.png', + self.frame['output_construction_dir'], self.key_frame_id_str + '_16_confirmed_corners.png' ), dpi=200, ) @@ -1993,8 +1691,7 @@ def project_and_confirm( # Save the figure. plt.savefig( os.path.join( - self.frame['output_construction_dir'], - self.key_frame_id_str + '_17_projected_corners.png', + self.frame['output_construction_dir'], self.key_frame_id_str + '_17_projected_corners.png' ), dpi=200, ) @@ -2007,9 +1704,7 @@ def project_and_confirm( plt.imshow(edge_img) for final_heliostat in final_heliostats: projected_corners = final_heliostat['projected_corners'] - found_confirmed_corners = self.filter_not_found_corners( - final_heliostat['confirmed_corners'] - ) + found_confirmed_corners = self.filter_not_found_corners(final_heliostat['confirmed_corners']) # Draw the heliostat name. if ( len(projected_corners) > 0 @@ -2057,8 +1752,7 @@ def project_and_confirm( # Sort heliostats left to right # ?? SCAFFOLDING RCB -- MAY NOT DO ANYTHING, IN WHICH CASE WE SHOULD DELETE. WILL BE INCORRECT IF BOXES ARE NOT SORTED LEFT TO RIGHT? final_heliostats = sorted( - final_heliostats, - key=lambda x: np.mean(np.array(x['projected_corners']), axis=0)[0], + final_heliostats, key=lambda x: np.mean(np.array(x['projected_corners']), axis=0)[0] ) # ?? SCAFFOLDING RCB -- MAY NOT DO ANYTHING, IN WHICH CASE WE SHOULD DELETE. WILL BE INCORRECT IF BOXES ARE NOT SORTED LEFT TO RIGHT? def confirm( @@ -2076,27 +1770,15 @@ def confirm( def confirm_facets(expected_corners, edges, tolerance, pixels): confirmed_facets = {} - for indx in range( - 0, len(expected_corners), self.specifications.corners_per_facet - ): + for indx in range(0, len(expected_corners), self.specifications.corners_per_facet): facet_id = indx // self.specifications.corners_per_facet - corners = [ - expected_corners[indx + i] - for i in range(0, self.specifications.corners_per_facet) - ] + corners = [expected_corners[indx + i] for i in range(0, self.specifications.corners_per_facet)] for corner_indx in range(0, len(corners)): corner = corners[corner_indx] - if ( - corner[0] >= max_col - or corner[0] < 0 - or corner[1] >= max_row - or corner[1] < 0 - ): + if corner[0] >= max_col or corner[0] < 0 or corner[1] >= max_row or corner[1] < 0: corners[corner_indx] = None - confirmed_facets[facet_id] = { - 'edges': confirm_facet_edges(corners, edges, tolerance, pixels) - } + confirmed_facets[facet_id] = {'edges': confirm_facet_edges(corners, edges, tolerance, pixels)} return confirmed_facets def confirm_facet_edges(corners, edges, tolerance, pixels): @@ -2112,9 +1794,7 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): A, B, C = find_hom_line_2points(corner1, corner2) if A is None: continue - min_col, max_col, min_row, max_row = min_max_col_row( - edges, corner1, corner2 - ) + min_col, max_col, min_row, max_row = min_max_col_row(edges, corner1, corner2) edge_pixels = [] # confirming if indx % 2 == 0: @@ -2143,9 +1823,7 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): return confirmed_edges def find_corners(confirmed_facets): - hel_corners = [ - None for _ in range(0, self.specifications.corners_per_heliostat) - ] + hel_corners = [None for _ in range(0, self.specifications.corners_per_heliostat)] for facet_indx, facet in confirmed_facets.items(): corners = [] edges = facet['edges'] @@ -2193,9 +1871,7 @@ def construct_points(confirmed_corners, corners3d): img, canny_type=canny_type ) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. # edges = CannyImg(self.frame['sky'], canny_type=canny_type) # ?? SCAFFOLDING RCB -- ORIGINAL CODE, MULTIPLE FAILURE IMPLICATIONS: (1) USING SKY, WHEN SKY WAS NOT USERED PREVIOUSLY. (2) CAUSES OPENCV TO CRASH. (THANKFULLY; OTHERWISE I WOULDN'T HAVE FOUND THE OTHER BUG.) - confirmed_facets = confirm_facets( - expected_corners, edges, tolerance, pixels - ) + confirmed_facets = confirm_facets(expected_corners, edges, tolerance, pixels) confirmed_corners = find_corners(confirmed_facets) flag_break = True for corner in confirmed_corners: @@ -2217,12 +1893,8 @@ def construct_points(confirmed_corners, corners3d): if len(points3d) < 4: # Four points needed for solvePNP(). expected_corners = [] break - mtx, dist, rvec, tvec, pnp_error = solvePNP( - points3d, points2d, h, w, pnptype=self.solvePnPtype - ) - expected_corners, _ = cv.projectPoints( - np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist - ) + mtx, dist, rvec, tvec, pnp_error = solvePNP(points3d, points2d, h, w, pnptype=self.solvePnPtype) + expected_corners, _ = cv.projectPoints(np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist) expected_corners = expected_corners.reshape(-1, 2) expected_corners = expected_corners.tolist() if flag_break: diff --git a/contrib/app/ufacet-s/helio_scan/lib/KeyFrameTrackSearch.py b/contrib/app/ufacet-s/helio_scan/lib/KeyFrameTrackSearch.py index 2a7a1ca2..b2b6a178 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/KeyFrameTrackSearch.py +++ b/contrib/app/ufacet-s/helio_scan/lib/KeyFrameTrackSearch.py @@ -41,9 +41,7 @@ def __init__( specifications: any, # heliostat design specifications input_frame_dir: str, # directory containing input frame images input_frame_id_format: str, # How to turn frame id numbers into strings. Ex: "06d" - all_frame_body_ext_list: list[ - str - ], # Full path of all the frames to be processed + all_frame_body_ext_list: list[str], # Full path of all the frames to be processed output_construction_dir: str, # Output directory to store results into # Render control. draw_track_images: bool, # Also output the human-consumable images @@ -67,51 +65,37 @@ def __init__( self.draw_track_images = draw_track_images # Tracking exit control. - self.minimum_fraction_of_confirmed_corners = ( - MINIMUM_FRACTION_OF_CONFIRMED_CORNERS - ) + self.minimum_fraction_of_confirmed_corners = MINIMUM_FRACTION_OF_CONFIRMED_CORNERS self.minimum_corners_required_inside_frame = math.ceil( - MINIMUM_CORNERS_REQUIRED_INSIDE_FRAME - * self.specifications.corners_per_heliostat + MINIMUM_CORNERS_REQUIRED_INSIDE_FRAME * self.specifications.corners_per_heliostat ) # Heliostat shape. self.corners3d = self.specifications.facets_corners # Extact key frame information. - self.sorted_key_frame_ids = ( - key_frame_projected_corners_fnxl.sorted_frame_id_list() - ) + self.sorted_key_frame_ids = key_frame_projected_corners_fnxl.sorted_frame_id_list() self.key_frame_id_1 = self.sorted_key_frame_ids[0] self.key_frame_id_2 = self.sorted_key_frame_ids[1] - self.key_frame_id_str_1 = upf.frame_id_str_given_frame_id( - self.key_frame_id_1, self.input_frame_id_format - ) - self.key_frame_id_str_2 = upf.frame_id_str_given_frame_id( - self.key_frame_id_2, self.input_frame_id_format - ) - self.key_frame_1_projected_list_of_name_xy_lists = ( - key_frame_projected_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_1) + self.key_frame_id_str_1 = upf.frame_id_str_given_frame_id(self.key_frame_id_1, self.input_frame_id_format) + self.key_frame_id_str_2 = upf.frame_id_str_given_frame_id(self.key_frame_id_2, self.input_frame_id_format) + self.key_frame_1_projected_list_of_name_xy_lists = key_frame_projected_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_1 ) - self.key_frame_2_projected_list_of_name_xy_lists = ( - key_frame_projected_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_2) + self.key_frame_2_projected_list_of_name_xy_lists = key_frame_projected_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_2 ) - self.key_frame_1_confirmed_list_of_name_xy_lists = ( - key_frame_confirmed_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_1) + self.key_frame_1_confirmed_list_of_name_xy_lists = key_frame_confirmed_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_1 ) - self.key_frame_2_confirmed_list_of_name_xy_lists = ( - key_frame_confirmed_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_2) + self.key_frame_2_confirmed_list_of_name_xy_lists = key_frame_confirmed_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_2 ) - self.heliostat_names = [ - name_xy_list[0] - for name_xy_list in self.key_frame_1_projected_list_of_name_xy_lists - ] + self.heliostat_names = [name_xy_list[0] for name_xy_list in self.key_frame_1_projected_list_of_name_xy_lists] # Create output directory for frame figures. # Projected. - self.output_projected_corners_dir = os.path.join( - self.output_construction_dir, 'projected' - ) + self.output_projected_corners_dir = os.path.join(self.output_construction_dir, 'projected') self.output_frame_projected_corners_dir = os.path.join( self.output_projected_corners_dir, self.key_frame_id_str_1 ) @@ -122,9 +106,7 @@ def __init__( if self.draw_track_images: ft.create_directories_if_necessary(self.output_frame_projected_corners_dir) # Confirmed. - self.output_confirmed_corners_dir = os.path.join( - self.output_construction_dir, 'confirmed' - ) + self.output_confirmed_corners_dir = os.path.join(self.output_construction_dir, 'confirmed') self.output_frame_confirmed_corners_dir = os.path.join( self.output_confirmed_corners_dir, self.key_frame_id_str_1 ) @@ -144,43 +126,30 @@ def __init__( self.PredictConfirm(tracking_direction='backward') def PredictConfirm(self, tracking_direction): - print( - 'In KeyFrameTrackSearch.PredictConfirm(tracking_direction="' - + tracking_direction - + '")...' - ) + print('In KeyFrameTrackSearch.PredictConfirm(tracking_direction="' + tracking_direction + '")...') # Build image name list from image file list. all_frame_id_str_list = [ - upf.frame_id_str_given_frame_file_body_ext(body_ext) - for body_ext in self.all_frame_body_ext_list + upf.frame_id_str_given_frame_file_body_ext(body_ext) for body_ext in self.all_frame_body_ext_list ] if tracking_direction == 'forward': # Velocity is from 1 --> 2. velocity, latest_projected_corners = self.velocity_and_initial_corners( - self.key_frame_1_projected_list_of_name_xy_lists, - self.key_frame_2_projected_list_of_name_xy_lists, + self.key_frame_1_projected_list_of_name_xy_lists, self.key_frame_2_projected_list_of_name_xy_lists ) # The image name list proceeds forward from key_frame 1. - key_frame_id_str_1_idx = all_frame_id_str_list.index( - self.key_frame_id_str_1 - ) + key_frame_id_str_1_idx = all_frame_id_str_list.index(self.key_frame_id_str_1) frame_id_str_sequence = all_frame_id_str_list[key_frame_id_str_1_idx:] elif tracking_direction == 'backward': # Velocity is from 2 --> 1. velocity, latest_projected_corners = self.velocity_and_initial_corners( - self.key_frame_2_projected_list_of_name_xy_lists, - self.key_frame_1_projected_list_of_name_xy_lists, + self.key_frame_2_projected_list_of_name_xy_lists, self.key_frame_1_projected_list_of_name_xy_lists ) # The image name list proceeds backward from key_frame 2. all_frame_id_str_list_reverse = copy.copy(all_frame_id_str_list) all_frame_id_str_list_reverse.reverse() - key_frame_id_str_2_idx = all_frame_id_str_list_reverse.index( - self.key_frame_id_str_2 - ) - frame_id_str_sequence = all_frame_id_str_list_reverse[ - key_frame_id_str_2_idx: - ] + key_frame_id_str_2_idx = all_frame_id_str_list_reverse.index(self.key_frame_id_str_2) + frame_id_str_sequence = all_frame_id_str_list_reverse[key_frame_id_str_2_idx:] else: raise ValueError( 'ERROR: In KeyFrameTrackSearch.PredictConfirm(), unexpected tracking_direction="' @@ -203,16 +172,10 @@ def PredictConfirm(self, tracking_direction): self.key_frame_id_1, self.key_frame_1_confirmed_list_of_name_xy_lists ) self.draw_frame_with_points_if_desired( - self.key_frame_id_str_1, - self.key_frame_1_projected_list_of_name_xy_lists, - 'Projected', - point_color='g', + self.key_frame_id_str_1, self.key_frame_1_projected_list_of_name_xy_lists, 'Projected', point_color='g' ) self.draw_frame_with_points_if_desired( - self.key_frame_id_str_1, - self.key_frame_1_confirmed_list_of_name_xy_lists, - 'Confirmed', - point_color='b', + self.key_frame_id_str_1, self.key_frame_1_confirmed_list_of_name_xy_lists, 'Confirmed', point_color='b' ) # Key frame 2. self.key_frame_projected_track_fnxl.add_list_of_name_xy_lists( @@ -238,9 +201,7 @@ def PredictConfirm(self, tracking_direction): for frame_id_str in frame_id_str_sequence[2:]: # print('In KeyFrameTrackSearch.PredictConfirm(), for key_frame_id='+self.key_frame_id_str_1+', '+str(tracking_direction)+' tracking corners into image: '+frame_id_str) img = None - frame_body_ext = upf.frame_file_body_ext_given_frame_id_str( - self.input_video_body, frame_id_str - ) + frame_body_ext = upf.frame_file_body_ext_given_frame_id_str(self.input_video_body, frame_id_str) frame_dir_body_ext = os.path.join(self.input_frame_dir, frame_body_ext) if os.path.exists(frame_dir_body_ext): img = cv.imread(frame_dir_body_ext) @@ -248,7 +209,9 @@ def PredictConfirm(self, tracking_direction): print( 'In KeyFrameTrackSearch.PredictConfirm(), Unexpected null image encountered.' ) # ?? SCAFFOLDING RCB -- WE DON'T KNOW WHY THIS IS HERE. CAN THIS HAPPEN? WHY IS IT ALLOWED? - assert False # ?? SCAFFOLDING RCB -- WE DON'T KNOW WHY THIS IS HERE. CAN THIS HAPPEN? WHY IS IT ALLOWED? + assert ( + False # ?? SCAFFOLDING RCB -- WE DON'T KNOW WHY THIS IS HERE. CAN THIS HAPPEN? WHY IS IT ALLOWED? + ) skip_flag.append(True) continue else: @@ -262,43 +225,28 @@ def PredictConfirm(self, tracking_direction): break for hel_indx in range(0, num_hel): for vel_indx in range(0, len(velocity[hel_indx])): - new_vel = [ - velocity[hel_indx][vel_indx][0] * cnt, - velocity[hel_indx][vel_indx][1] * cnt, - ] + new_vel = [velocity[hel_indx][vel_indx][0] * cnt, velocity[hel_indx][vel_indx][1] * cnt] velocity[hel_indx][vel_indx] = new_vel """Edge Detection based on Image""" img = cv.GaussianBlur(img, (5, 5), 0) cnt = 0 - projected_list_of_name_xy_lists = ( - [] - ) # For adding to the FrameNameXyList object. - confirmed_list_of_name_xy_lists = ( - [] - ) # For adding to the FrameNameXyList object. + projected_list_of_name_xy_lists = [] # For adding to the FrameNameXyList object. + confirmed_list_of_name_xy_lists = [] # For adding to the FrameNameXyList object. for hel_indx in range(0, num_hel): if stop_track_flags[hel_indx]: continue """Predict Corners""" - predicted_corners = self.predict_corners( - velocity, latest_projected_corners, hel_indx - ) - n_inside = self.number_of_predicted_corners_inside_frame( - img, predicted_corners - ) + predicted_corners = self.predict_corners(velocity, latest_projected_corners, hel_indx) + n_inside = self.number_of_predicted_corners_inside_frame(img, predicted_corners) # print('In KeyFrameTrackSearch.PredictConfirm(), n_inside =', n_inside, '; self.minimum_corners_required_inside_frame =', self.minimum_corners_required_inside_frame) if n_inside < self.minimum_corners_required_inside_frame: """ending criterion""" stop_track_flags[hel_indx] = True continue """Confirm Corners""" - ( - projected_corners, - confirmed_corners, - num_non_None_confirmed_corners, - ) = self.confirm_corners( + (projected_corners, confirmed_corners, num_non_None_confirmed_corners) = self.confirm_corners( img, predicted_corners, iterations=self.iterations, @@ -332,32 +280,20 @@ def PredictConfirm(self, tracking_direction): # Add to FrameNameXyList objects. self.key_frame_projected_track_fnxl.add_list_of_name_xy_lists( - upf.frame_id_given_frame_id_str(frame_id_str), - projected_list_of_name_xy_lists, + upf.frame_id_given_frame_id_str(frame_id_str), projected_list_of_name_xy_lists ) self.key_frame_confirmed_track_fnxl.add_list_of_name_xy_lists( - upf.frame_id_given_frame_id_str(frame_id_str), - confirmed_list_of_name_xy_lists, + upf.frame_id_given_frame_id_str(frame_id_str), confirmed_list_of_name_xy_lists ) # Draw. self.draw_frame_with_points_if_desired_aux( - img, - frame_id_str, - projected_list_of_name_xy_lists, - 'Projected', - point_color='m', + img, frame_id_str, projected_list_of_name_xy_lists, 'Projected', point_color='m' ) self.draw_frame_with_points_if_desired_aux( - img, - frame_id_str, - confirmed_list_of_name_xy_lists, - 'Confirmed', - point_color='c', + img, frame_id_str, confirmed_list_of_name_xy_lists, 'Confirmed', point_color='c' ) - def velocity_and_initial_corners( - self, key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists - ): + def velocity_and_initial_corners(self, key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists): """ Motion is from A --> B. Reurns point-by-point velocity vector in image coordinates, and also the set of points @@ -365,9 +301,7 @@ def velocity_and_initial_corners( """ velocity = [] latest_projected_corners = [] - for name_xy_list_A, name_xy_list_B in zip( - key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists - ): + for name_xy_list_A, name_xy_list_B in zip(key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists): name_A = name_xy_list_A[0] xy_list_A = name_xy_list_A[1] name_B = name_xy_list_B[0] @@ -392,32 +326,17 @@ def velocity_and_initial_corners( return velocity, latest_projected_corners def draw_frame_with_points_if_desired( - self, - frame_id_str, - list_of_name_xy_lists, - Confirmed_or_Projected_str, - point_color, + self, frame_id_str, list_of_name_xy_lists, Confirmed_or_Projected_str, point_color ): if self.draw_track_images: - frame_body_ext = upf.frame_file_body_ext_given_frame_id_str( - self.input_video_body, frame_id_str - ) + frame_body_ext = upf.frame_file_body_ext_given_frame_id_str(self.input_video_body, frame_id_str) img = cv.imread(os.path.join(self.input_frame_dir, frame_body_ext)) self.draw_frame_with_points_if_desired_aux( - img, - frame_id_str, - list_of_name_xy_lists, - Confirmed_or_Projected_str, - point_color, + img, frame_id_str, list_of_name_xy_lists, Confirmed_or_Projected_str, point_color ) def draw_frame_with_points_if_desired_aux( - self, - img, - frame_id_str, - list_of_name_xy_lists, - Confirmed_or_Projected_str, - point_color, + self, img, frame_id_str, list_of_name_xy_lists, Confirmed_or_Projected_str, point_color ): if self.draw_track_images: plt.figure() @@ -434,12 +353,7 @@ def draw_frame_with_points_if_desired_aux( + frame_id_str ) fig_file_body_ext = ( - self.input_video_body - + '_' - + frame_id_str - + '_' - + Confirmed_or_Projected_str.lower() - + '.png' + self.input_video_body + '_' + frame_id_str + '_' + Confirmed_or_Projected_str.lower() + '.png' ) if Confirmed_or_Projected_str == 'Projected': figure_output_dir = self.output_frame_projected_corners_dir @@ -462,18 +376,12 @@ def predict_corners(self, velocity, latest_projected_corners, hel_indx): velocity = velocity[hel_indx] latest_projected_corners = latest_projected_corners[hel_indx] for corner_indx in range(0, len(latest_projected_corners)): - pred_col = ( - latest_projected_corners[corner_indx][0] + velocity[corner_indx][0] - ) - pred_row = ( - latest_projected_corners[corner_indx][1] + velocity[corner_indx][1] - ) + pred_col = latest_projected_corners[corner_indx][0] + velocity[corner_indx][0] + pred_row = latest_projected_corners[corner_indx][1] + velocity[corner_indx][1] predicted_corners.append([pred_col, pred_row]) return predicted_corners - def update_velocity( - self, velocity, predicted_corners, confirmed_corners, hel_indx, skip_flag - ): + def update_velocity(self, velocity, predicted_corners, confirmed_corners, hel_indx, skip_flag): previous_velocity = velocity[hel_indx] deltas_adjust = [] for conf_corner, pred_corner in zip(confirmed_corners, predicted_corners): @@ -498,10 +406,7 @@ def update_velocity( else: break for vel_indx in range(0, len(new_velocity)): - new_velocity[vel_indx] = [ - new_velocity[vel_indx][0] / cnt, - new_velocity[vel_indx][1] / cnt, - ] + new_velocity[vel_indx] = [new_velocity[vel_indx][0] / cnt, new_velocity[vel_indx][1] / cnt] return new_velocity @@ -516,37 +421,18 @@ def number_of_predicted_corners_inside_frame(self, img, predicted_corners): return cnt def confirm_corners( - self, - img, - predicted_corners, - canny_levels, - iterations, - confirm_type='', - tolerance=3, - pixels=100, + self, img, predicted_corners, canny_levels, iterations, confirm_type='', tolerance=3, pixels=100 ): def confirm_facets(expected_corners, edges, tolerance, pixels): confirmed_facets = {} - for indx in range( - 0, len(expected_corners), self.specifications.corners_per_facet - ): + for indx in range(0, len(expected_corners), self.specifications.corners_per_facet): facet_id = indx // self.specifications.corners_per_facet - corners = [ - expected_corners[indx + i] - for i in range(0, self.specifications.corners_per_facet) - ] + corners = [expected_corners[indx + i] for i in range(0, self.specifications.corners_per_facet)] for corner_indx in range(0, len(corners)): corner = corners[corner_indx] - if ( - corner[0] >= max_col - or corner[0] < 0 - or corner[1] >= max_row - or corner[1] < 0 - ): + if corner[0] >= max_col or corner[0] < 0 or corner[1] >= max_row or corner[1] < 0: corners[corner_indx] = None - confirmed_facets[facet_id] = { - 'edges': confirm_facet_edges(corners, edges, tolerance, pixels) - } + confirmed_facets[facet_id] = {'edges': confirm_facet_edges(corners, edges, tolerance, pixels)} return confirmed_facets def confirm_facet_edges(corners, edges, tolerance, pixels): @@ -562,9 +448,7 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): A, B, C = find_hom_line_2points(corner1, corner2) if A is None: continue - min_col, max_col, min_row, max_row = min_max_col_row( - edges, corner1, corner2 - ) + min_col, max_col, min_row, max_row = min_max_col_row(edges, corner1, corner2) edge_pixels = [] # confirming if indx % 2 == 0: @@ -590,9 +474,7 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): return confirmed_edges def find_corners(confirmed_facets): - hel_corners = [ - None for _ in range(0, self.specifications.corners_per_heliostat) - ] + hel_corners = [None for _ in range(0, self.specifications.corners_per_heliostat)] for facet_indx, facet in confirmed_facets.items(): corners = [] edges = facet['edges'] @@ -661,9 +543,7 @@ def construct_points(confirmed_corners, corners3d): img, canny_type=canny_types[i] ) # ?? SCAFFOLDING RCB -- CHANGE THIS VARIABLE NAME TO "edge_image" - confirmed_facets = confirm_facets( - expected_corners, edges, tolerance, pixels - ) + confirmed_facets = confirm_facets(expected_corners, edges, tolerance, pixels) confirmed_corners = find_corners(confirmed_facets) flag_break = True flag_less_than_6 = False @@ -696,9 +576,7 @@ def construct_points(confirmed_corners, corners3d): dist_coeff=self.dist_coeff, ) - expected_corners, _ = cv.projectPoints( - np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist - ) + expected_corners, _ = cv.projectPoints(np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist) expected_corners = expected_corners.reshape(-1, 2) expected_corners = expected_corners.tolist() previous_expected_corners = expected_corners.copy() diff --git a/contrib/app/ufacet-s/helio_scan/lib/NameFrameXyList.py b/contrib/app/ufacet-s/helio_scan/lib/NameFrameXyList.py index e41e61b3..a1f2449a 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/NameFrameXyList.py +++ b/contrib/app/ufacet-s/helio_scan/lib/NameFrameXyList.py @@ -162,9 +162,7 @@ def merge_list_of_frame_xy_lists( self.dictionary[hel_name] = copy.deepcopy(input_list_of_frame_xy_lists) else: existing_list_of_frame_xy_lists = self.dictionary[hel_name] - existing_frame_list = [ - frame_xy_list[0] for frame_xy_list in existing_list_of_frame_xy_lists - ] + existing_frame_list = [frame_xy_list[0] for frame_xy_list in existing_list_of_frame_xy_lists] for input_frame_xy_list in input_list_of_frame_xy_lists: input_frame_xy_list_copy = copy.deepcopy(input_frame_xy_list) input_frame_copy = input_frame_xy_list_copy[0] @@ -229,9 +227,7 @@ def merge_frame_xy_list( self.dictionary[hel_name] = [copy.deepcopy(input_frame_xy_list)] else: existing_list_of_frame_xy_lists = self.dictionary[hel_name] - existing_frame_list = [ - frame_xy_list[0] for frame_xy_list in existing_list_of_frame_xy_lists - ] + existing_frame_list = [frame_xy_list[0] for frame_xy_list in existing_list_of_frame_xy_lists] input_frame_xy_list_copy = copy.deepcopy(input_frame_xy_list) input_frame_copy = input_frame_xy_list_copy[0] input_xy_list_copy = input_frame_xy_list_copy[1] @@ -277,11 +273,7 @@ def merge_frame_xy_list( existing_xy_list += input_xy_list_copy def add_FrameNameXyList( - self, - input_fnxl, - warn_if_common_frame=False, - skip_if_common_frame=False, - error_if_common_frame=True, + self, input_fnxl, warn_if_common_frame=False, skip_if_common_frame=False, error_if_common_frame=True ): # Walk the input FranemNameXyList, adding data to the current NameFrameXyList. for frame_id in input_fnxl.sorted_frame_id_list(): @@ -291,11 +283,7 @@ def add_FrameNameXyList( xy_list = name_xy_list[1] new_frame_xy_list = [frame_id, xy_list] self.merge_frame_xy_list( - hel_name, - new_frame_xy_list, - warn_if_common_frame, - skip_if_common_frame, - error_if_common_frame, + hel_name, new_frame_xy_list, warn_if_common_frame, skip_if_common_frame, error_if_common_frame ) # READ @@ -321,10 +309,7 @@ def load(self, input_dir_body_ext): # "nfxl" abbreviates "NameFrameXyList" # print('In NameFrameXyList.load(), loading input file: ', input_dir_body_ext) # Check if the input file exists. if not ft.file_exists(input_dir_body_ext): - raise OSError( - 'In NameFrameXyList.load(), file does not exist: ' - + str(input_dir_body_ext) - ) + raise OSError('In NameFrameXyList.load(), file does not exist: ' + str(input_dir_body_ext)) # Open and read the file. with open(input_dir_body_ext, newline='') as input_stream: reader = csv.reader(input_stream, delimiter=',') @@ -342,13 +327,9 @@ def parse_row_list_of_frame_xylists(self, input_row_remainder): if n_frames == 0: return [] else: - return self.parse_row_list_of_frame_xylists_aux( - input_row_remainder[1:], n_frames - ) + return self.parse_row_list_of_frame_xylists_aux(input_row_remainder[1:], n_frames) - def parse_row_list_of_frame_xylists_aux( - self, input_row_list_of_frame_xylists, n_frames - ): + def parse_row_list_of_frame_xylists_aux(self, input_row_list_of_frame_xylists, n_frames): # Fetch this frame_xylist's frame_id and the number of points in its xylist. frame_id = int(input_row_list_of_frame_xylists[0]) n_points = int(input_row_list_of_frame_xylists[1]) @@ -357,10 +338,7 @@ def parse_row_list_of_frame_xylists_aux( for idx in range(0, n_points): idx_x = 2 + (2 * idx) idx_y = idx_x + 1 - vertex_str = [ - input_row_list_of_frame_xylists[idx_x], - input_row_list_of_frame_xylists[idx_y], - ] + vertex_str = [input_row_list_of_frame_xylists[idx_x], input_row_list_of_frame_xylists[idx_y]] x_str = vertex_str[0] y_str = vertex_str[1] x = float(x_str) @@ -419,12 +397,7 @@ def print( indent=None, ): # Number of blankss to print at the beginning of each line. # Print. - dt.print_dict( - self.dictionary, - max_keys=max_keys, - max_value_length=max_value_length, - indent=indent, - ) + dt.print_dict(self.dictionary, max_keys=max_keys, max_value_length=max_value_length, indent=indent) # HELPER FUNCTIONS diff --git a/contrib/app/ufacet-s/helio_scan/lib/NoTest_Heliostat3dInfer_ORIGINAL_SCRATCH.py b/contrib/app/ufacet-s/helio_scan/lib/NoTest_Heliostat3dInfer_ORIGINAL_SCRATCH.py index 67ee1890..335d0298 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/NoTest_Heliostat3dInfer_ORIGINAL_SCRATCH.py +++ b/contrib/app/ufacet-s/helio_scan/lib/NoTest_Heliostat3dInfer_ORIGINAL_SCRATCH.py @@ -60,51 +60,37 @@ def __init__( self.draw_track_images = draw_track_images # Tracking exit control. - self.minimum_fraction_of_confirmed_corners = ( - MINIMUM_FRACTION_OF_CONFIRMED_CORNERS - ) + self.minimum_fraction_of_confirmed_corners = MINIMUM_FRACTION_OF_CONFIRMED_CORNERS self.minimum_corners_required_inside_frame = math.ceil( - MINIMUM_CORNERS_REQUIRED_INSIDE_FRAME - * self.specifications.corners_per_heliostat + MINIMUM_CORNERS_REQUIRED_INSIDE_FRAME * self.specifications.corners_per_heliostat ) # Heliostat shape. self.corners3d = self.specifications.facets_corners # Extact key frame information. - self.sorted_key_frame_ids = ( - key_frame_projected_corners_fnxl.sorted_frame_id_list() - ) + self.sorted_key_frame_ids = key_frame_projected_corners_fnxl.sorted_frame_id_list() self.key_frame_id_1 = self.sorted_key_frame_ids[0] self.key_frame_id_2 = self.sorted_key_frame_ids[1] - self.key_frame_id_str_1 = upf.frame_id_str_given_frame_id( - self.key_frame_id_1, self.input_frame_id_format - ) - self.key_frame_id_str_2 = upf.frame_id_str_given_frame_id( - self.key_frame_id_2, self.input_frame_id_format + self.key_frame_id_str_1 = upf.frame_id_str_given_frame_id(self.key_frame_id_1, self.input_frame_id_format) + self.key_frame_id_str_2 = upf.frame_id_str_given_frame_id(self.key_frame_id_2, self.input_frame_id_format) + self.key_frame_1_projected_list_of_name_xy_lists = key_frame_projected_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_1 ) - self.key_frame_1_projected_list_of_name_xy_lists = ( - key_frame_projected_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_1) + self.key_frame_2_projected_list_of_name_xy_lists = key_frame_projected_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_2 ) - self.key_frame_2_projected_list_of_name_xy_lists = ( - key_frame_projected_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_2) + self.key_frame_1_confirmed_list_of_name_xy_lists = key_frame_confirmed_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_1 ) - self.key_frame_1_confirmed_list_of_name_xy_lists = ( - key_frame_confirmed_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_1) + self.key_frame_2_confirmed_list_of_name_xy_lists = key_frame_confirmed_corners_fnxl.list_of_name_xy_lists( + self.key_frame_id_2 ) - self.key_frame_2_confirmed_list_of_name_xy_lists = ( - key_frame_confirmed_corners_fnxl.list_of_name_xy_lists(self.key_frame_id_2) - ) - self.heliostat_names = [ - name_xy_list[0] - for name_xy_list in self.key_frame_1_projected_list_of_name_xy_lists - ] + self.heliostat_names = [name_xy_list[0] for name_xy_list in self.key_frame_1_projected_list_of_name_xy_lists] # Create output directory for frame figures. # Projected. - self.output_projected_corners_dir = os.path.join( - self.output_construction_dir, 'projected' - ) + self.output_projected_corners_dir = os.path.join(self.output_construction_dir, 'projected') self.output_frame_projected_corners_dir = os.path.join( self.output_projected_corners_dir, self.key_frame_id_str_1 ) @@ -115,9 +101,7 @@ def __init__( if self.draw_track_images: ft.create_directories_if_necessary(self.output_frame_projected_corners_dir) # Confirmed. - self.output_confirmed_corners_dir = os.path.join( - self.output_construction_dir, 'confirmed' - ) + self.output_confirmed_corners_dir = os.path.join(self.output_construction_dir, 'confirmed') self.output_frame_confirmed_corners_dir = os.path.join( self.output_confirmed_corners_dir, self.key_frame_id_str_1 ) @@ -137,43 +121,30 @@ def __init__( self.PredictConfirm(tracking_direction='backward') def PredictConfirm(self, tracking_direction): - print( - 'In Heliostat3dInfer.PredictConfirm(tracking_direction="' - + tracking_direction - + '")...' - ) + print('In Heliostat3dInfer.PredictConfirm(tracking_direction="' + tracking_direction + '")...') # Build image name list from image file list. all_frame_id_str_list = [ - upf.frame_id_str_given_frame_file_body_ext(body_ext) - for body_ext in self.all_frame_body_ext_list + upf.frame_id_str_given_frame_file_body_ext(body_ext) for body_ext in self.all_frame_body_ext_list ] if tracking_direction == 'forward': # Velocity is from 1 --> 2. velocity, latest_projected_corners = self.velocity_and_initial_corners( - self.key_frame_1_projected_list_of_name_xy_lists, - self.key_frame_2_projected_list_of_name_xy_lists, + self.key_frame_1_projected_list_of_name_xy_lists, self.key_frame_2_projected_list_of_name_xy_lists ) # The image name list proceeds forward from key_frame 1. - key_frame_id_str_1_idx = all_frame_id_str_list.index( - self.key_frame_id_str_1 - ) + key_frame_id_str_1_idx = all_frame_id_str_list.index(self.key_frame_id_str_1) frame_id_str_sequence = all_frame_id_str_list[key_frame_id_str_1_idx:] elif tracking_direction == 'backward': # Velocity is from 2 --> 1. velocity, latest_projected_corners = self.velocity_and_initial_corners( - self.key_frame_2_projected_list_of_name_xy_lists, - self.key_frame_1_projected_list_of_name_xy_lists, + self.key_frame_2_projected_list_of_name_xy_lists, self.key_frame_1_projected_list_of_name_xy_lists ) # The image name list proceeds backward from key_frame 2. all_frame_id_str_list_reverse = copy.copy(all_frame_id_str_list) all_frame_id_str_list_reverse.reverse() - key_frame_id_str_2_idx = all_frame_id_str_list_reverse.index( - self.key_frame_id_str_2 - ) - frame_id_str_sequence = all_frame_id_str_list_reverse[ - key_frame_id_str_2_idx: - ] + key_frame_id_str_2_idx = all_frame_id_str_list_reverse.index(self.key_frame_id_str_2) + frame_id_str_sequence = all_frame_id_str_list_reverse[key_frame_id_str_2_idx:] else: raise ValueError( 'ERROR: In Heliostat3dInfer.PredictConfirm(), unexpected tracking_direction="' @@ -196,16 +167,10 @@ def PredictConfirm(self, tracking_direction): self.key_frame_id_1, self.key_frame_1_confirmed_list_of_name_xy_lists ) self.draw_frame_with_points_if_desired( - self.key_frame_id_str_1, - self.key_frame_1_projected_list_of_name_xy_lists, - 'Projected', - point_color='g', + self.key_frame_id_str_1, self.key_frame_1_projected_list_of_name_xy_lists, 'Projected', point_color='g' ) self.draw_frame_with_points_if_desired( - self.key_frame_id_str_1, - self.key_frame_1_confirmed_list_of_name_xy_lists, - 'Confirmed', - point_color='b', + self.key_frame_id_str_1, self.key_frame_1_confirmed_list_of_name_xy_lists, 'Confirmed', point_color='b' ) # Key frame 2. self.key_frame_projected_track_fnxl.add_list_of_name_xy_lists( @@ -231,9 +196,7 @@ def PredictConfirm(self, tracking_direction): for frame_id_str in frame_id_str_sequence[2:]: # print('In Heliostat3dInfer.PredictConfirm(), for key_frame_id='+self.key_frame_id_str_1+', '+str(tracking_direction)+' tracking corners into image: '+frame_id_str) img = None - frame_body_ext = upf.frame_file_body_ext_given_frame_id_str( - self.input_video_body, frame_id_str - ) + frame_body_ext = upf.frame_file_body_ext_given_frame_id_str(self.input_video_body, frame_id_str) frame_dir_body_ext = os.path.join(self.input_frame_dir, frame_body_ext) if os.path.exists(frame_dir_body_ext): img = cv.imread(frame_dir_body_ext) @@ -251,43 +214,28 @@ def PredictConfirm(self, tracking_direction): break for hel_indx in range(0, num_hel): for vel_indx in range(0, len(velocity[hel_indx])): - new_vel = [ - velocity[hel_indx][vel_indx][0] * cnt, - velocity[hel_indx][vel_indx][1] * cnt, - ] + new_vel = [velocity[hel_indx][vel_indx][0] * cnt, velocity[hel_indx][vel_indx][1] * cnt] velocity[hel_indx][vel_indx] = new_vel """Edge Detection based on Image""" img = cv.GaussianBlur(img, (5, 5), 0) cnt = 0 - projected_list_of_name_xy_lists = ( - [] - ) # For adding to the FrameNameXyList object. - confirmed_list_of_name_xy_lists = ( - [] - ) # For adding to the FrameNameXyList object. + projected_list_of_name_xy_lists = [] # For adding to the FrameNameXyList object. + confirmed_list_of_name_xy_lists = [] # For adding to the FrameNameXyList object. for hel_indx in range(0, num_hel): if stop_track_flags[hel_indx]: continue """Predict Corners""" - predicted_corners = self.predict_corners( - velocity, latest_projected_corners, hel_indx - ) - n_inside = self.number_of_predicted_corners_inside_frame( - img, predicted_corners - ) + predicted_corners = self.predict_corners(velocity, latest_projected_corners, hel_indx) + n_inside = self.number_of_predicted_corners_inside_frame(img, predicted_corners) # print('In Heliostat3dInfer.PredictConfirm(), n_inside =', n_inside, '; self.minimum_corners_required_inside_frame =', self.minimum_corners_required_inside_frame) if n_inside < self.minimum_corners_required_inside_frame: """ending criterion""" stop_track_flags[hel_indx] = True continue """Confirm Corners""" - ( - projected_corners, - confirmed_corners, - num_non_None_confirmed_corners, - ) = self.confirm_corners( + (projected_corners, confirmed_corners, num_non_None_confirmed_corners) = self.confirm_corners( img, predicted_corners, iterations=self.iterations, @@ -321,32 +269,20 @@ def PredictConfirm(self, tracking_direction): # Add to FrameNameXyList objects. self.key_frame_projected_track_fnxl.add_list_of_name_xy_lists( - upf.frame_id_given_frame_id_str(frame_id_str), - projected_list_of_name_xy_lists, + upf.frame_id_given_frame_id_str(frame_id_str), projected_list_of_name_xy_lists ) self.key_frame_confirmed_track_fnxl.add_list_of_name_xy_lists( - upf.frame_id_given_frame_id_str(frame_id_str), - confirmed_list_of_name_xy_lists, + upf.frame_id_given_frame_id_str(frame_id_str), confirmed_list_of_name_xy_lists ) # Draw. self.draw_frame_with_points_if_desired_aux( - img, - frame_id_str, - projected_list_of_name_xy_lists, - 'Projected', - point_color='m', + img, frame_id_str, projected_list_of_name_xy_lists, 'Projected', point_color='m' ) self.draw_frame_with_points_if_desired_aux( - img, - frame_id_str, - confirmed_list_of_name_xy_lists, - 'Confirmed', - point_color='c', + img, frame_id_str, confirmed_list_of_name_xy_lists, 'Confirmed', point_color='c' ) - def velocity_and_initial_corners( - self, key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists - ): + def velocity_and_initial_corners(self, key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists): """ Motion is from A --> B. Reurns point-by-point velocity vector in image coordinates, and also the set of points @@ -354,9 +290,7 @@ def velocity_and_initial_corners( """ velocity = [] latest_projected_corners = [] - for name_xy_list_A, name_xy_list_B in zip( - key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists - ): + for name_xy_list_A, name_xy_list_B in zip(key_frame_A_list_of_name_xy_lists, key_frame_B_list_of_name_xy_lists): name_A = name_xy_list_A[0] xy_list_A = name_xy_list_A[1] name_B = name_xy_list_B[0] @@ -381,32 +315,17 @@ def velocity_and_initial_corners( return velocity, latest_projected_corners def draw_frame_with_points_if_desired( - self, - frame_id_str, - list_of_name_xy_lists, - Confirmed_or_Projected_str, - point_color, + self, frame_id_str, list_of_name_xy_lists, Confirmed_or_Projected_str, point_color ): if self.draw_track_images: - frame_body_ext = upf.frame_file_body_ext_given_frame_id_str( - self.input_video_body, frame_id_str - ) + frame_body_ext = upf.frame_file_body_ext_given_frame_id_str(self.input_video_body, frame_id_str) img = cv.imread(os.path.join(self.input_frame_dir, frame_body_ext)) self.draw_frame_with_points_if_desired_aux( - img, - frame_id_str, - list_of_name_xy_lists, - Confirmed_or_Projected_str, - point_color, + img, frame_id_str, list_of_name_xy_lists, Confirmed_or_Projected_str, point_color ) def draw_frame_with_points_if_desired_aux( - self, - img, - frame_id_str, - list_of_name_xy_lists, - Confirmed_or_Projected_str, - point_color, + self, img, frame_id_str, list_of_name_xy_lists, Confirmed_or_Projected_str, point_color ): if self.draw_track_images: plt.figure() @@ -423,12 +342,7 @@ def draw_frame_with_points_if_desired_aux( + frame_id_str ) fig_file_body_ext = ( - self.input_video_body - + '_' - + frame_id_str - + '_' - + Confirmed_or_Projected_str.lower() - + '.png' + self.input_video_body + '_' + frame_id_str + '_' + Confirmed_or_Projected_str.lower() + '.png' ) if Confirmed_or_Projected_str == 'Projected': figure_output_dir = self.output_frame_projected_corners_dir @@ -451,18 +365,12 @@ def predict_corners(self, velocity, latest_projected_corners, hel_indx): velocity = velocity[hel_indx] latest_projected_corners = latest_projected_corners[hel_indx] for corner_indx in range(0, len(latest_projected_corners)): - pred_col = ( - latest_projected_corners[corner_indx][0] + velocity[corner_indx][0] - ) - pred_row = ( - latest_projected_corners[corner_indx][1] + velocity[corner_indx][1] - ) + pred_col = latest_projected_corners[corner_indx][0] + velocity[corner_indx][0] + pred_row = latest_projected_corners[corner_indx][1] + velocity[corner_indx][1] predicted_corners.append([pred_col, pred_row]) return predicted_corners - def update_velocity( - self, velocity, predicted_corners, confirmed_corners, hel_indx, skip_flag - ): + def update_velocity(self, velocity, predicted_corners, confirmed_corners, hel_indx, skip_flag): previous_velocity = velocity[hel_indx] deltas_adjust = [] for conf_corner, pred_corner in zip(confirmed_corners, predicted_corners): @@ -487,10 +395,7 @@ def update_velocity( else: break for vel_indx in range(0, len(new_velocity)): - new_velocity[vel_indx] = [ - new_velocity[vel_indx][0] / cnt, - new_velocity[vel_indx][1] / cnt, - ] + new_velocity[vel_indx] = [new_velocity[vel_indx][0] / cnt, new_velocity[vel_indx][1] / cnt] return new_velocity @@ -505,37 +410,18 @@ def number_of_predicted_corners_inside_frame(self, img, predicted_corners): return cnt def confirm_corners( - self, - img, - predicted_corners, - canny_levels, - iterations, - confirm_type='', - tolerance=3, - pixels=100, + self, img, predicted_corners, canny_levels, iterations, confirm_type='', tolerance=3, pixels=100 ): def confirm_facets(expected_corners, edges, tolerance, pixels): confirmed_facets = {} - for indx in range( - 0, len(expected_corners), self.specifications.corners_per_facet - ): + for indx in range(0, len(expected_corners), self.specifications.corners_per_facet): facet_id = indx // self.specifications.corners_per_facet - corners = [ - expected_corners[indx + i] - for i in range(0, self.specifications.corners_per_facet) - ] + corners = [expected_corners[indx + i] for i in range(0, self.specifications.corners_per_facet)] for corner_indx in range(0, len(corners)): corner = corners[corner_indx] - if ( - corner[0] >= max_col - or corner[0] < 0 - or corner[1] >= max_row - or corner[1] < 0 - ): + if corner[0] >= max_col or corner[0] < 0 or corner[1] >= max_row or corner[1] < 0: corners[corner_indx] = None - confirmed_facets[facet_id] = { - 'edges': confirm_facet_edges(corners, edges, tolerance, pixels) - } + confirmed_facets[facet_id] = {'edges': confirm_facet_edges(corners, edges, tolerance, pixels)} return confirmed_facets def confirm_facet_edges(corners, edges, tolerance, pixels): @@ -551,9 +437,7 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): A, B, C = find_hom_line_2points(corner1, corner2) if A is None: continue - min_col, max_col, min_row, max_row = min_max_col_row( - edges, corner1, corner2 - ) + min_col, max_col, min_row, max_row = min_max_col_row(edges, corner1, corner2) edge_pixels = [] # confirming if indx % 2 == 0: @@ -579,9 +463,7 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): return confirmed_edges def find_corners(confirmed_facets): - hel_corners = [ - None for _ in range(0, self.specifications.corners_per_heliostat) - ] + hel_corners = [None for _ in range(0, self.specifications.corners_per_heliostat)] for facet_indx, facet in confirmed_facets.items(): corners = [] edges = facet['edges'] @@ -648,9 +530,7 @@ def construct_points(confirmed_corners, corners3d): else: edges = CannyImg(img, canny_type=canny_types[i]) - confirmed_facets = confirm_facets( - expected_corners, edges, tolerance, pixels - ) + confirmed_facets = confirm_facets(expected_corners, edges, tolerance, pixels) confirmed_corners = find_corners(confirmed_facets) flag_break = True flag_less_than_6 = False @@ -683,9 +563,7 @@ def construct_points(confirmed_corners, corners3d): dist_coeff=self.dist_coeff, ) - expected_corners, _ = cv.projectPoints( - np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist - ) + expected_corners, _ = cv.projectPoints(np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist) expected_corners = expected_corners.reshape(-1, 2) expected_corners = expected_corners.tolist() previous_expected_corners = expected_corners.copy() diff --git a/contrib/app/ufacet-s/helio_scan/lib/ufacet_heliostat_3d_analysis.py b/contrib/app/ufacet-s/helio_scan/lib/ufacet_heliostat_3d_analysis.py index cbd49d25..0d09df99 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/ufacet_heliostat_3d_analysis.py +++ b/contrib/app/ufacet-s/helio_scan/lib/ufacet_heliostat_3d_analysis.py @@ -107,9 +107,7 @@ def surface_normal_heliostat(heliostat, option, specifications): def facet_center(top_left, bottom_right, top_right, bottom_left): e = np.array(bottom_right) - np.array(top_left) # direction vector of diagonal α f = np.array(bottom_left) - np.array(top_right) # direction vector of diagonal β - cdv = np.array(top_right) - np.array( - top_left - ) # vector from two points in the two lines respectively + cdv = np.array(top_right) - np.array(top_left) # vector from two points in the two lines respectively fcd_cross = np.cross(f, cdv) fe_cross = np.cross(f, e) @@ -181,9 +179,7 @@ def translation(heliostat, specifications, confirm=False): for key, corners in sorted(heliostat.items()): new_corners = [] for corner in corners: - new_corners.append( - [corner[0] - center[0], corner[1] - center[1], corner[2] - center[2]] - ) + new_corners.append([corner[0] - center[0], corner[1] - center[1], corner[2] - center[2]]) translated_heliostat[key] = new_corners @@ -239,9 +235,7 @@ def find_best_translation(heliostat, heliostat_theoretical_dict=None): bottom_right_corner = heliostat_theoretical_dict[key][BOTTOM_RIGHT_CORNER_INDX] bottom_left_corner = heliostat_theoretical_dict[key][BOTTOM_LEFT_CORNER_INDX] - flat_center = facet_center( - top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner - ) + flat_center = facet_center(top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner) flat_centers_x_sum += flat_center[0] flat_centers_y_sum += flat_center[1] flat_centers_z_sum += flat_center[2] @@ -273,9 +267,9 @@ def rotation(heliostat, surface_normal, confirm=False): def rotation_around_surface_normal(heliostat, heliostat_theoretical_dict=None): surface_normal = np.array([0, 0, 1]) - a = np.array( - heliostat[BOTTOM_RIGHT_FACET_INDX][BOTTOM_RIGHT_CORNER_INDX] - ) - np.array(heliostat[TOP_LEFT_FACET_INDX][TOP_LEFT_CORNER_INDX]) + a = np.array(heliostat[BOTTOM_RIGHT_FACET_INDX][BOTTOM_RIGHT_CORNER_INDX]) - np.array( + heliostat[TOP_LEFT_FACET_INDX][TOP_LEFT_CORNER_INDX] + ) a = a / np.linalg.norm(a) b = np.array(heliostat[BOTTOM_LEFT_FACET_INDX][BOTTOM_LEFT_CORNER_INDX]) - np.array( @@ -283,14 +277,12 @@ def rotation_around_surface_normal(heliostat, heliostat_theoretical_dict=None): ) b = b / np.linalg.norm(b) - a_th = np.array( - heliostat_theoretical_dict[BOTTOM_RIGHT_FACET_INDX][BOTTOM_RIGHT_CORNER_INDX] - ) - np.array(heliostat_theoretical_dict[TOP_LEFT_FACET_INDX][TOP_LEFT_CORNER_INDX]) + a_th = np.array(heliostat_theoretical_dict[BOTTOM_RIGHT_FACET_INDX][BOTTOM_RIGHT_CORNER_INDX]) - np.array( + heliostat_theoretical_dict[TOP_LEFT_FACET_INDX][TOP_LEFT_CORNER_INDX] + ) a_th = a_th / np.linalg.norm(a_th) - b_th = np.array( - heliostat_theoretical_dict[BOTTOM_LEFT_FACET_INDX][BOTTOM_LEFT_CORNER_INDX] - ) - np.array( + b_th = np.array(heliostat_theoretical_dict[BOTTOM_LEFT_FACET_INDX][BOTTOM_LEFT_CORNER_INDX]) - np.array( heliostat_theoretical_dict[TOP_RIGHT_FACET_INDX][TOP_RIGHT_CORNER_INDX] ) b_th = b_th / np.linalg.norm(b_th) @@ -375,13 +367,7 @@ def find_best_scaling(heliostat, specifications, single_factor=False): bottom_right_corner = corners[BOTTOM_RIGHT_CORNER_INDX] bottom_left_corner = corners[BOTTOM_LEFT_CORNER_INDX] - corners = [ - top_left_corner, - top_right_corner, - bottom_right_corner, - bottom_left_corner, - top_left_corner, - ] + corners = [top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner, top_left_corner] for corner_indx in range(0, len(corners) - 1): corner = corners[corner_indx] next_corner = corners[corner_indx + 1] @@ -416,27 +402,13 @@ def find_best_scaling(heliostat, specifications, single_factor=False): def translate_rotate_scale( - input_heliostat, - option, - specifications=None, - heliostat_theoretical_dict=None, - confirm=False, + input_heliostat, option, specifications=None, heliostat_theoretical_dict=None, confirm=False ): - heliostat = translation( - input_heliostat, confirm=confirm, specifications=specifications - ) - surface_normal = surface_normal_heliostat( - heliostat, option, specifications=specifications - ) + heliostat = translation(input_heliostat, confirm=confirm, specifications=specifications) + surface_normal = surface_normal_heliostat(heliostat, option, specifications=specifications) heliostat = rotation(heliostat, surface_normal, confirm=confirm) - heliostat = rotation_around_surface_normal( - heliostat, heliostat_theoretical_dict=heliostat_theoretical_dict - ) - heliostat = scaling( - heliostat, - specifications=specifications, - heliostat_theoretical_dict=heliostat_theoretical_dict, - ) + heliostat = rotation_around_surface_normal(heliostat, heliostat_theoretical_dict=heliostat_theoretical_dict) + heliostat = scaling(heliostat, specifications=specifications, heliostat_theoretical_dict=heliostat_theoretical_dict) return input_heliostat @@ -484,9 +456,7 @@ def plot_heliostat_3d( corners = [ heliostat_theoretical_dict[TOP_LEFT_FACET_INDX][TOP_LEFT_CORNER_INDX], heliostat_theoretical_dict[TOP_RIGHT_FACET_INDX][TOP_RIGHT_CORNER_INDX], - heliostat_theoretical_dict[BOTTOM_RIGHT_FACET_INDX][ - BOTTOM_RIGHT_CORNER_INDX - ], + heliostat_theoretical_dict[BOTTOM_RIGHT_FACET_INDX][BOTTOM_RIGHT_CORNER_INDX], heliostat_theoretical_dict[BOTTOM_LEFT_FACET_INDX][BOTTOM_LEFT_CORNER_INDX], ] xdata = [corner[0] for corner in corners] @@ -498,18 +468,10 @@ def plot_heliostat_3d( ax.scatter3D(xdata[3], ydata[3], zdata[3], facecolor='c') # draw center center = facet_center( - heliostat_theoretical_dict[specifications.centered_facet][ - TOP_LEFT_CORNER_INDX - ], - heliostat_theoretical_dict[specifications.centered_facet][ - BOTTOM_RIGHT_CORNER_INDX - ], - heliostat_theoretical_dict[specifications.centered_facet][ - TOP_RIGHT_CORNER_INDX - ], - heliostat_theoretical_dict[specifications.centered_facet][ - BOTTOM_LEFT_CORNER_INDX - ], + heliostat_theoretical_dict[specifications.centered_facet][TOP_LEFT_CORNER_INDX], + heliostat_theoretical_dict[specifications.centered_facet][BOTTOM_RIGHT_CORNER_INDX], + heliostat_theoretical_dict[specifications.centered_facet][TOP_RIGHT_CORNER_INDX], + heliostat_theoretical_dict[specifications.centered_facet][BOTTOM_LEFT_CORNER_INDX], ) ax.scatter3D(center[0], center[1], center[2], facecolor='tab:green') # draw heliostat @@ -567,10 +529,7 @@ def plot_heliostat_3d( # draw cameras if frame_camera_pose_dict != None: print('In plot_heliostat_3d(), figure_name = ', figure_name) - print( - 'In plot_heliostat_3d(), number of camera frames = ', - len(frame_camera_pose_dict.keys()), - ) + print('In plot_heliostat_3d(), number of camera frames = ', len(frame_camera_pose_dict.keys())) idx = 0 for rvec_tvec in dt.list_of_values_in_sorted_key_order( frame_camera_pose_dict @@ -604,9 +563,7 @@ def plot_heliostat_3d( # plt.show() path_figure_base = os.path.join(saving_path, figure_name) path_figure = path_figure_base + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) if frame_camera_pose_dict != None: # ?? SCAFFOLDING RCB -- CALL plot_and_save_plane_views(path_figure_base, ax) HERE INSTEAD. @@ -654,25 +611,19 @@ def plot_heliostat_3d( # XY ax.view_init(90, -90) # x-y plane view path_figure = path_figure_base + '_xy' + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) # XZ ax.view_init(0, -90) # x-z plane view path_figure = path_figure_base + '_xz' + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) # YZ ax.view_init(0, 0) # y-z plane view path_figure = path_figure_base + '_yz' + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) plt.close() @@ -707,9 +658,7 @@ def plot_heliostat_2d( corners = [ heliostat_theoretical_dict[TOP_LEFT_FACET_INDX][TOP_LEFT_CORNER_INDX], heliostat_theoretical_dict[TOP_RIGHT_FACET_INDX][TOP_RIGHT_CORNER_INDX], - heliostat_theoretical_dict[BOTTOM_RIGHT_FACET_INDX][ - BOTTOM_RIGHT_CORNER_INDX - ], + heliostat_theoretical_dict[BOTTOM_RIGHT_FACET_INDX][BOTTOM_RIGHT_CORNER_INDX], heliostat_theoretical_dict[BOTTOM_LEFT_FACET_INDX][BOTTOM_LEFT_CORNER_INDX], ] xdata = [corner[0] for corner in corners] @@ -720,18 +669,10 @@ def plot_heliostat_2d( plt.scatter(xdata[3], ydata[3], facecolor='c') # draw center center = facet_center( - heliostat_theoretical_dict[specifications.centered_facet][ - TOP_LEFT_CORNER_INDX - ], - heliostat_theoretical_dict[specifications.centered_facet][ - BOTTOM_RIGHT_CORNER_INDX - ], - heliostat_theoretical_dict[specifications.centered_facet][ - TOP_RIGHT_CORNER_INDX - ], - heliostat_theoretical_dict[specifications.centered_facet][ - BOTTOM_LEFT_CORNER_INDX - ], + heliostat_theoretical_dict[specifications.centered_facet][TOP_LEFT_CORNER_INDX], + heliostat_theoretical_dict[specifications.centered_facet][BOTTOM_RIGHT_CORNER_INDX], + heliostat_theoretical_dict[specifications.centered_facet][TOP_RIGHT_CORNER_INDX], + heliostat_theoretical_dict[specifications.centered_facet][BOTTOM_LEFT_CORNER_INDX], ) plt.scatter(center[0], center[1], facecolor='tab:green') for _, corners in heliostat_theoretical_dict.items(): @@ -910,10 +851,7 @@ def plot_canting_angles( canting_angles = offset_canting_angles if plot: - df = pd.DataFrame( - {'Nx': anglesx}, - index=[i + 1 for i in range(0, specifications.facets_per_heliostat)], - ) + df = pd.DataFrame({'Nx': anglesx}, index=[i + 1 for i in range(0, specifications.facets_per_heliostat)]) ax = df.plot.bar(rot=0, color={"Nx": "tab:blue"}, figsize=(15, 10)) title = title_prefix + ': X-Component of Surface Normal' if normal_wrt_average: @@ -931,10 +869,7 @@ def plot_canting_angles( plt.savefig(saving_path + '/' + figure_name) plt.close() - df = pd.DataFrame( - {'Ny': anglesy}, - index=[i + 1 for i in range(0, specifications.facets_per_heliostat)], - ) + df = pd.DataFrame({'Ny': anglesy}, index=[i + 1 for i in range(0, specifications.facets_per_heliostat)]) ax = df.plot.bar(rot=0, color={"Ny": "tab:orange"}, figsize=(15, 10)) title = title_prefix + ': Y-Component of Surface Normal' if normal_wrt_average: @@ -951,10 +886,7 @@ def plot_canting_angles( plt.savefig(saving_path + '/' + figure_name) plt.close() - df = pd.DataFrame( - {'Nz': anglesz}, - index=[i + 1 for i in range(0, specifications.facets_per_heliostat)], - ) + df = pd.DataFrame({'Nz': anglesz}, index=[i + 1 for i in range(0, specifications.facets_per_heliostat)]) ax = df.plot.bar(rot=0, color={"Nz": "magenta"}, figsize=(15, 10)) title = title_prefix + ': Z-Component of Surface Normal' if normal_wrt_average: @@ -1062,9 +994,7 @@ def plot_pose_estimation( figure_name += '.png' T = [0, 0, 0] if best_translation: - T = find_best_translation( - heliostat, heliostat_theoretical_dict=heliostat_theoretical_dict - ) + T = find_best_translation(heliostat, heliostat_theoretical_dict=heliostat_theoretical_dict) xdiff = [] ydiff = [] zdiff = [] @@ -1074,9 +1004,7 @@ def plot_pose_estimation( top_right_corner = corners[TOP_RIGHT_CORNER_INDX] bottom_right_corner = corners[BOTTOM_RIGHT_CORNER_INDX] bottom_left_corner = corners[BOTTOM_LEFT_CORNER_INDX] - center = facet_center( - top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner - ) + center = facet_center(top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner) x, y, z = center x -= T[0] y -= T[1] @@ -1085,9 +1013,7 @@ def plot_pose_estimation( top_right_corner = heliostat_theoretical_dict[key][TOP_RIGHT_CORNER_INDX] bottom_right_corner = heliostat_theoretical_dict[key][BOTTOM_RIGHT_CORNER_INDX] bottom_left_corner = heliostat_theoretical_dict[key][BOTTOM_LEFT_CORNER_INDX] - center_th = facet_center( - top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner - ) + center_th = facet_center(top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner) xth, yth, zth = center_th xdiff.append(x - xth) ydiff.append(y - yth) @@ -1099,13 +1025,7 @@ def plot_pose_estimation( index=[i + 1 for i in range(0, specifications.facets_per_heliostat)], ) ax = df.plot.bar( - rot=0, - color={ - "X offset": "tab:blue", - "Y offset": "tab:orange", - "Z offset": "cyan", - }, - figsize=(15, 10), + rot=0, color={"X offset": "tab:blue", "Y offset": "tab:orange", "Z offset": "cyan"}, figsize=(15, 10) ) plt.grid(axis='y') plt.title(title_prefix + ': Centroid Difference') @@ -1154,12 +1074,9 @@ def plot_pose_rotation_estimation( pose_rotation_estimations[key] = [rot_z] if plot: df = pd.DataFrame( - {'Rotation About Z': rot_z_deg_list}, - index=[i + 1 for i in range(0, specifications.facets_per_heliostat)], - ) - ax = df.plot.bar( - rot=0, color={'Rotation About Z': 'tab:blue'}, figsize=(15, 10) + {'Rotation About Z': rot_z_deg_list}, index=[i + 1 for i in range(0, specifications.facets_per_heliostat)] ) + ax = df.plot.bar(rot=0, color={'Rotation About Z': 'tab:blue'}, figsize=(15, 10)) plt.grid(axis='y') plt.title(title_prefix + ': Facet Z Rotation') plt.xlabel('Facet id') @@ -1183,9 +1100,7 @@ def plot_square_sides_quality( ): scale = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) if best_scaling: - scale = find_best_scaling( - heliostat, specifications=specifications, single_factor=False - ) + scale = find_best_scaling(heliostat, specifications=specifications, single_factor=False) if isinstance(scale, list): scale_x, scale_y, scale_z = scale scale = np.array([[scale_x, 0, 0], [0, scale_y, 0], [0, 0, scale_z]]) @@ -1205,12 +1120,7 @@ def plot_square_sides_quality( bottom_right_corner = corners[BOTTOM_RIGHT_CORNER_INDX] bottom_left_corner = corners[BOTTOM_LEFT_CORNER_INDX] - corners = [ - top_left_corner, - top_right_corner, - bottom_right_corner, - bottom_left_corner, - ] + corners = [top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner] corners_arr = np.array(corners).reshape(-1, 3) corners_arr = corners_arr.T new_corners = np.matmul(scale, corners_arr) @@ -1306,12 +1216,8 @@ def plot_square_sides_quality( plt.title(title_prefix) plt.xlabel('Facet id') plt.ylabel('Meters') - bottom = min( - [min(top_error), min(right_error), min(bottom_error), min(left_error)] - ) - top = max( - [max(top_error), max(right_error), max(bottom_error), max(left_error)] - ) + bottom = min([min(top_error), min(right_error), min(bottom_error), min(left_error)]) + top = max([max(top_error), max(right_error), max(bottom_error), max(left_error)]) x = [indx for indx in range(-1, specifications.facets_per_heliostat + 1)] y = [rmse for _ in range(-1, specifications.facets_per_heliostat + 1)] plt.plot(x, y, linewidth=1.5, color='red', linestyle='dashed') @@ -1344,9 +1250,7 @@ def plot_square_diagonals_quality( ): scale = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) if best_scaling: - scale = find_best_scaling( - heliostat, specifications=specifications, single_factor=False - ) + scale = find_best_scaling(heliostat, specifications=specifications, single_factor=False) if isinstance(scale, list): scale_x, scale_y, scale_z = scale scale = np.array([[scale_x, 0, 0], [0, scale_y, 0], [0, 0, scale_z]]) @@ -1365,12 +1269,7 @@ def plot_square_diagonals_quality( bottom_left_corner = corners[BOTTOM_LEFT_CORNER_INDX] # ?? SCAFFOLDING RCB -- WHAT IS THIS? IS IT NECESSARY? IF NOT, THEN RIP OUT, HERE AND IN SQUARE SIDE QUALITY FXN. - corners = [ - top_left_corner, - top_right_corner, - bottom_right_corner, - bottom_left_corner, - ] + corners = [top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner] corners_arr = np.array(corners).reshape(-1, 3) corners_arr = corners_arr.T new_corners = np.matmul(scale, corners_arr) @@ -1382,9 +1281,7 @@ def plot_square_diagonals_quality( bottom_right_corner = new_corners[BOTTOM_RIGHT_CORNER_INDX] bottom_left_corner = new_corners[BOTTOM_LEFT_CORNER_INDX] - std_diagonal_dist = np.sqrt( - specifications.facet_width**2 + specifications.facet_height**2 - ) + std_diagonal_dist = np.sqrt(specifications.facet_width**2 + specifications.facet_height**2) bltr_dist = distance3d(bottom_left_corner, top_right_corner) bltr_error = bltr_dist - std_diagonal_dist @@ -1426,18 +1323,12 @@ def plot_square_diagonals_quality( # plt.close() df = pd.DataFrame( - { - 'BottomLeft-TopRight Error': bltr_error_list, - 'BottomRight-TopLeft Error': brtl_error_list, - }, + {'BottomLeft-TopRight Error': bltr_error_list, 'BottomRight-TopLeft Error': brtl_error_list}, index=[i + 1 for i in range(0, specifications.facets_per_heliostat)], ) ax = df.plot.bar( rot=0, - color={ - "BottomLeft-TopRight Error": "tab:blue", - "BottomRight-TopLeft Error": "tab:orange", - }, + color={"BottomLeft-TopRight Error": "tab:blue", "BottomRight-TopLeft Error": "tab:orange"}, figsize=(15, 10), ) plt.title(title_prefix) @@ -1560,27 +1451,16 @@ def plot_square_diagonal_offset_quality( def draw_annotated_frame_figure( - corner_xy_list, - input_video_body, - input_frame_dir, - input_frame_id_format, - frame_id, - hel_name, - explain, - output_dir, + corner_xy_list, input_video_body, input_frame_dir, input_frame_id_format, frame_id, hel_name, explain, output_dir ): # Construct annotations. annotation_list = [] # Data: Observed corners in image space. - add_analysis_annotations( - corner_xy_list, hel_name, False, True, False, 'c', 0.2, 0.6, 5, annotation_list - ) + add_analysis_annotations(corner_xy_list, hel_name, False, True, False, 'c', 0.2, 0.6, 5, annotation_list) # Construct figure. plt.figure() # Fetch and draw image file. - frame_body_ext = upf.frame_file_body_ext_given_frame_id( - input_video_body, frame_id, input_frame_id_format - ) + frame_body_ext = upf.frame_file_body_ext_given_frame_id(input_video_body, frame_id, input_frame_id_format) frame_dir_body_ext = os.path.join(input_frame_dir, frame_body_ext) # print('In draw_annotated_frame_figure(), reading frame image file: ', frame_dir_body_ext) frame_img = cv.imread(frame_dir_body_ext) @@ -1600,33 +1480,20 @@ def draw_annotated_frame_figure( context_str='Heliostats3dInference.save_annotated_frame_figure()', save=True, output_dir=output_dir, - output_body=( - hel_name + '_image_analysis_fig' - ), # plot_image_figure() will add ".png" + output_body=(hel_name + '_image_analysis_fig'), # plot_image_figure() will add ".png" dpi=1000, # 250, include_figure_idx_in_filename=False, ) def draw_annotated_frame_image( - corner_xy_list, - input_video_body, - input_frame_dir, - input_frame_id_format, - frame_id, - hel_name, - note, - output_dir, + corner_xy_list, input_video_body, input_frame_dir, input_frame_id_format, frame_id, hel_name, note, output_dir ): # Construct annotations. annotation_list = [] # Data: Observed corners in image space. - add_analysis_annotations( - corner_xy_list, hel_name, True, False, False, 'r', 0.2, 0.6, 5, annotation_list - ) - add_analysis_annotations( - corner_xy_list, hel_name, False, True, False, 'c', 0.2, 0.6, 5, annotation_list - ) + add_analysis_annotations(corner_xy_list, hel_name, True, False, False, 'r', 0.2, 0.6, 5, annotation_list) + add_analysis_annotations(corner_xy_list, hel_name, False, True, False, 'c', 0.2, 0.6, 5, annotation_list) if (note is not None) and len(note) > 0: note_xy = [150, 150] # Upper left corner of the image. annotation_list.append( @@ -1634,18 +1501,11 @@ def draw_annotated_frame_image( 'text', [note_xy], note, - rctxt.RenderControlText( - fontsize=4, - color='r', - horizontalalignment='left', - verticalalignment='top', - ), + rctxt.RenderControlText(fontsize=4, color='r', horizontalalignment='left', verticalalignment='top'), ) ) # Fetch image file. - frame_body_ext = upf.frame_file_body_ext_given_frame_id( - input_video_body, frame_id, input_frame_id_format - ) + frame_body_ext = upf.frame_file_body_ext_given_frame_id(input_video_body, frame_id, input_frame_id_format) frame_dir_body_ext = os.path.join(input_frame_dir, frame_body_ext) # print('In draw_annotated_frame_image(), reading frame image file: ', frame_dir_body_ext) frame_img = cv.imread(frame_dir_body_ext) @@ -1676,19 +1536,13 @@ def add_analysis_annotations( facet_boundary_list = construct_facet_boundaries(corner_xy_list) for facet_boundary in facet_boundary_list: boundary_style = rcps.outline(color=color, linewidth=linewidth) - annotation_list.append( - pa.PlotAnnotation('point_seq', facet_boundary, None, boundary_style) - ) + annotation_list.append(pa.PlotAnnotation('point_seq', facet_boundary, None, boundary_style)) if draw_markers: point_style = rcps.marker(color=color, marker='.', markersize=markersize) - annotation_list.append( - pa.PlotAnnotation('point_seq', corner_xy_list, None, point_style) - ) + annotation_list.append(pa.PlotAnnotation('point_seq', corner_xy_list, None, point_style)) if draw_label: label_xy = g2d.label_point(corner_xy_list) - annotation_list.append( - pa.PlotAnnotation('text', [label_xy], text, rctxt.bold(fontsize, color)) - ) + annotation_list.append(pa.PlotAnnotation('text', [label_xy], text, rctxt.bold(fontsize, color))) def construct_facet_boundaries(corner_xy_list): @@ -1719,17 +1573,12 @@ def analyze_and_render_heliostat_3d( tracked_frame_camera_pose_dict=None, processed_frame_camera_pose_dict=None, ): - flat_hel_name = heliostat_name_given_heliostat_3d_dir_body_ext( - flat_hel_dir_body_ext - ) + flat_hel_name = heliostat_name_given_heliostat_3d_dir_body_ext(flat_hel_dir_body_ext) hel_name = heliostat_name_given_heliostat_3d_dir_body_ext(hel_dir_body_ext) # Generate plots. flat_heliostat = read_txt_file_to_heliostat(flat_hel_dir_body_ext, specifications) - msg = ( - 'In analyze_and_render_3d_heliostat_model(), calling generate_plots() for heliostat ' - + hel_name - ) + msg = 'In analyze_and_render_3d_heliostat_model(), calling generate_plots() for heliostat ' + hel_name if explain is None: explain_2 = None else: @@ -1750,23 +1599,13 @@ def analyze_and_render_heliostat_3d( tracked_frame_camera_pose_dict=tracked_frame_camera_pose_dict, processed_frame_camera_pose_dict=processed_frame_camera_pose_dict, ) - print( - 'In analyze_and_render_3d_heliostat_model(), generate_plots() for heliostat ' - + hel_name - + ' finished.' - ) + print('In analyze_and_render_3d_heliostat_model(), generate_plots() for heliostat ' + hel_name + ' finished.') # Generate csv files. msg.replace('generate_plots()', 'generate_csv()') print(msg) - generate_csv( - hel_dir_body_ext, output_dir, specifications, flat_heliostat, option='noRotate' - ) - print( - 'In analyze_and_render_3d_heliostat_model(), generate_csv() for heliostat ' - + hel_name - + ' finished.' - ) + generate_csv(hel_dir_body_ext, output_dir, specifications, flat_heliostat, option='noRotate') + print('In analyze_and_render_3d_heliostat_model(), generate_csv() for heliostat ' + hel_name + ' finished.') def generate_plots( @@ -1782,14 +1621,10 @@ def generate_plots( tracked_frame_camera_pose_dict=None, processed_frame_camera_pose_dict=None, ): - heliostat = read_txt_file_to_heliostat( - filename=filename, specifications=specifications - ) + heliostat = read_txt_file_to_heliostat(filename=filename, specifications=specifications) if heliostat is None: return None - hel_name = heliostat_name_given_heliostat_3d_dir_body_ext( - filename - ) # ?? SCAFFOLDING RCB -- CRUFTY. FIX THIS. + hel_name = heliostat_name_given_heliostat_3d_dir_body_ext(filename) # ?? SCAFFOLDING RCB -- CRUFTY. FIX THIS. heliostat_path = output_path # ?? SCAFFOLDING RCB -- CLEAN THIS UP ft.create_directories_if_necessary(heliostat_path) @@ -2014,9 +1849,7 @@ def generate_plots( # camera_rvec=camera_rvec, camera_tvec=camera_tvec, camera_matrix=camera_matrix, distortion_coefficients=distortion_coefficients) -def generate_csv( - hel_dir_body_ext, output_path, specifications, heliostat_theoretical_dict, option -): +def generate_csv(hel_dir_body_ext, output_path, specifications, heliostat_theoretical_dict, option): def save_csv(dict, output_path=' ', name=' '): header = ['Heliostat'] for facet_id in range(0, specifications.facets_per_heliostat): @@ -2055,9 +1888,7 @@ def save_csv(dict, output_path=' ', name=' '): square_diagonal_offsets = {} # ?? SCAFFOLDING RCB -- USED TO BE FOR LOOP HERE OVER HELIOSTAT NAMES, AND CSV FILES WERE COMPILATION ACROSS HELIOSTATS. RETURN TO THIS? # try: - heliostat = read_txt_file_to_heliostat( - filename=hel_dir_body_ext, specifications=specifications - ) + heliostat = read_txt_file_to_heliostat(filename=hel_dir_body_ext, specifications=specifications) hel_name = heliostat_name_given_heliostat_3d_dir_body_ext(hel_dir_body_ext) # if heliostat is None: # continue @@ -2073,11 +1904,7 @@ def save_csv(dict, output_path=' ', name=' '): plot=False, option=option, ) - save_csv( - canting_angles, - output_path=output_path, - name=hel_name + '_canglesXYZ_' + option + '.csv', - ) + save_csv(canting_angles, output_path=output_path, name=hel_name + '_canglesXYZ_' + option + '.csv') wrt_avg_option = 'wrtAvg' canting_angles[hel_name] = plot_canting_angles( heliostat, @@ -2088,11 +1915,7 @@ def save_csv(dict, output_path=' ', name=' '): option=wrt_avg_option, normal_wrt_average=True, ) - save_csv( - canting_angles, - output_path=output_path, - name=hel_name + '_canglesXYZ_' + wrt_avg_option + '.csv', - ) + save_csv(canting_angles, output_path=output_path, name=hel_name + '_canglesXYZ_' + wrt_avg_option + '.csv') """ Pose Estimation """ @@ -2104,11 +1927,7 @@ def save_csv(dict, output_path=' ', name=' '): plot=False, option=option, ) - save_csv( - pose_estimations, - output_path=output_path, - name=hel_name + '_poseXYZ_' + option + '.csv', - ) + save_csv(pose_estimations, output_path=output_path, name=hel_name + '_poseXYZ_' + option + '.csv') """ Pose Rotation Estimation """ @@ -2120,11 +1939,7 @@ def save_csv(dict, output_path=' ', name=' '): plot=False, option=option, ) - save_csv( - pose_rotation_estimations, - output_path=output_path, - name=hel_name + '_poseRotZ_' + option + '.csv', - ) + save_csv(pose_rotation_estimations, output_path=output_path, name=hel_name + '_poseRotZ_' + option + '.csv') """ Square Sides Quality Error """ @@ -2136,11 +1951,7 @@ def save_csv(dict, output_path=' ', name=' '): plot=False, option=option, ) - save_csv( - square_sides_errors, - output_path=output_path, - name=hel_name + '_serrorTRBL_' + option + '.csv', - ) + save_csv(square_sides_errors, output_path=output_path, name=hel_name + '_serrorTRBL_' + option + '.csv') """ Square Diagonals Quality Error """ @@ -2152,11 +1963,7 @@ def save_csv(dict, output_path=' ', name=' '): plot=False, option=option, ) - save_csv( - square_diagonals_errors, - output_path=output_path, - name=hel_name + '_diagerrors_' + option + '.csv', - ) + save_csv(square_diagonals_errors, output_path=output_path, name=hel_name + '_diagerrors_' + option + '.csv') """ Square Diagonal Offsets Quality """ @@ -2168,11 +1975,7 @@ def save_csv(dict, output_path=' ', name=' '): plot=False, option=option, ) - save_csv( - square_diagonal_offsets, - output_path=output_path, - name=hel_name + '_diagoffsets_' + option + '.csv', - ) + save_csv(square_diagonal_offsets, output_path=output_path, name=hel_name + '_diagoffsets_' + option + '.csv') # except: @@ -2192,9 +1995,7 @@ def save_csv(dict, output_path=' ', name=' '): def heliostat_name_given_heliostat_3d_dir_body_ext( heliostat_3d_dir_body_ext, ): # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL, CORRECT, ERROR-CHECKING. (IT'S LATE, AND I'M OUT OF TIME.) - heliostat_3d_dir, heliostat_3d_body, heliostat_3d_ext = ft.path_components( - heliostat_3d_dir_body_ext - ) + heliostat_3d_dir, heliostat_3d_body, heliostat_3d_ext = ft.path_components(heliostat_3d_dir_body_ext) if heliostat_3d_body.find('distorted') == -1: # Case 1: No projected/distorted substrings. tokens = heliostat_3d_body.split('_') @@ -2214,23 +2015,15 @@ def heliostat_name_given_heliostat_3d_dir_body_ext( def corners_3d_dir_body_ext( - input_video_body, - hel_name, - projected_or_confirmed_str, - distorted_or_undistorted_str, - corners_3d_dir, + input_video_body, hel_name, projected_or_confirmed_str, distorted_or_undistorted_str, corners_3d_dir ): # Assemble filename body. c_3d_body = hel_name if (input_video_body is not None) and (len(input_video_body) > 0): c_3d_body = input_video_body + '_' + c_3d_body - if (projected_or_confirmed_str is not None) and ( - len(projected_or_confirmed_str) > 0 - ): + if (projected_or_confirmed_str is not None) and (len(projected_or_confirmed_str) > 0): c_3d_body += '_' + projected_or_confirmed_str - if (distorted_or_undistorted_str is not None) and ( - len(distorted_or_undistorted_str) > 0 - ): + if (distorted_or_undistorted_str is not None) and (len(distorted_or_undistorted_str) > 0): c_3d_body += '_' + distorted_or_undistorted_str c_3d_body += '_' + 'corners_3d' # Add extension and directory. @@ -2263,16 +2056,9 @@ def save_heliostat_3d( # Write the 3-d corner file. ft.create_directories_if_necessary(output_dir) output_heliostat_3d_dir_body_ext = corners_3d_dir_body_ext( - input_video_body, - hel_name, - projected_or_confirmed_str, - distorted_or_undistorted_str, - output_dir, - ) - print( - 'In Heliostats3dInference.save_heliostat_3d(), writing file: ', - output_heliostat_3d_dir_body_ext, + input_video_body, hel_name, projected_or_confirmed_str, distorted_or_undistorted_str, output_dir ) + print('In Heliostats3dInference.save_heliostat_3d(), writing file: ', output_heliostat_3d_dir_body_ext) with open(output_heliostat_3d_dir_body_ext, "w") as output_stream: wr = csv.writer(output_stream) wr.writerows(corner_xyz_list) @@ -2323,25 +2109,19 @@ def plot_and_save_plane_views(path_figure_base, ax): # XY ax.view_init(90, -90) # x-y plane view path_figure = path_figure_base + '_xy' + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) # XZ ax.view_init(0, -90) # x-z plane view path_figure = path_figure_base + '_xz' + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) # YZ ax.view_init(0, 0) # y-z plane view path_figure = path_figure_base + '_yz' + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) @@ -2429,12 +2209,8 @@ def plot_heliostat_with_camera_poses( # draw cameras if dict_of_frame_dicts != None: - plot_heliostat_with_camera_poses_aux( - ax, dict_of_frame_dicts, False, style=tracked_style - ) - plot_heliostat_with_camera_poses_aux( - ax, dict_of_frame_dicts, True, style=processed_style - ) + plot_heliostat_with_camera_poses_aux(ax, dict_of_frame_dicts, False, style=tracked_style) + plot_heliostat_with_camera_poses_aux(ax, dict_of_frame_dicts, True, style=processed_style) # plt.legend() ax3d.set_3d_axes_equal(ax) @@ -2446,18 +2222,14 @@ def plot_heliostat_with_camera_poses( ft.create_directories_if_necessary(saving_path) path_figure_base = os.path.join(saving_path, figure_name) path_figure = path_figure_base + '.png' - print( - 'In plot_heliostat_3d(), saving figure:', path_figure - ) # ?? SCAFFOLDING RCB -- TEMPORARY ? + print('In plot_heliostat_3d(), saving figure:', path_figure) # ?? SCAFFOLDING RCB -- TEMPORARY ? plt.savefig(path_figure, dpi=1200) plot_and_save_plane_views(path_figure_base, ax) plt.close() -def plot_heliostat_with_camera_poses_aux( - ax, dict_of_frame_dicts, only_use_for_metrology, style, label=None -): +def plot_heliostat_with_camera_poses_aux(ax, dict_of_frame_dicts, only_use_for_metrology, style, label=None): # Collect individual coordinate lists. x_list = [] y_list = [] diff --git a/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_clear.py b/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_clear.py index 398fdac2..43bf5a78 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_clear.py +++ b/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_clear.py @@ -30,11 +30,6 @@ def prepare_render_directory(output_render_dir, delete_suffix, render_control): # For cases 1 and 2, we want to clear out previous rendering results. # For case 3, we want to leave previous results in place, and selectively generate the new material using the # render control flags. - if (not ft.directory_is_empty(output_render_dir)) and ( - render_control.clear_previous == True - ): - print( - 'In prepare_render_directory(), deleting previous render files from:', - output_render_dir, - ) + if (not ft.directory_is_empty(output_render_dir)) and (render_control.clear_previous == True): + print('In prepare_render_directory(), deleting previous render files from:', output_render_dir) ft.delete_files_in_directory(output_render_dir, ('*' + delete_suffix)) diff --git a/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_frame.py b/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_frame.py index 8cd051cb..f2b5c6e9 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_frame.py +++ b/contrib/app/ufacet-s/helio_scan/lib/ufacet_pipeline_frame.py @@ -42,9 +42,7 @@ def frame_id_given_frame_id_str(frame_id_str): return int(frame_id_str) -def frame_file_body_ext_given_frame_id( - input_video_body, frame_id, input_frame_id_format -): +def frame_file_body_ext_given_frame_id(input_video_body, frame_id, input_frame_id_format): """Our extracted video file names are of the form: VideoFileBody.nnnnnn.JPG where the "nnnnnn" is a zero-padded numerical string such as "001258" and the @@ -88,9 +86,7 @@ def frame_file_body_ext_given_frame_id_str(input_video_body: str, frame_id_str: return input_video_body + '.' + frame_id_str + '.JPG' -def frame_id_str_given_prefix_number_KeyWord_body_ext( - prefix_number_keyword_body_ext, keyword -): +def frame_id_str_given_prefix_number_KeyWord_body_ext(prefix_number_keyword_body_ext, keyword): # For multiple items (key corners, key frame tracks, ...), our canonical file names are of the form: # VideoFileBody_nnnnnn__fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" and @@ -252,18 +248,14 @@ def frame_id_str_given_key_corners_body_ext(key_corners_body_ext): # VideoFileBody_nnnnnn_corners_fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" # - return frame_id_str_given_prefix_number_KeyWord_body_ext( - key_corners_body_ext, 'corners' - ) + return frame_id_str_given_prefix_number_KeyWord_body_ext(key_corners_body_ext, 'corners') def frame_id_given_key_corners_body_ext(key_corners_body_ext): # Our key corners FrameNameXyList file names are of the form: # VideoFileBody_nnnnnn_corners_fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" - return frame_id_given_frame_id_str( - frame_id_str_given_key_corners_body_ext(key_corners_body_ext) - ) + return frame_id_given_frame_id_str(frame_id_str_given_key_corners_body_ext(key_corners_body_ext)) def frame_id_str_given_key_projected_tracks_body_ext(key_track_body_ext): @@ -271,18 +263,14 @@ def frame_id_str_given_key_projected_tracks_body_ext(key_track_body_ext): # VideoFileBody_nnnnnn_track_fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" # - return frame_id_str_given_prefix_number_Adjective_KeyWord_body_ext( - key_track_body_ext, 'projected', 'tracks' - ) + return frame_id_str_given_prefix_number_Adjective_KeyWord_body_ext(key_track_body_ext, 'projected', 'tracks') def frame_id_given_key_projected_tracks_body_ext(key_track_body_ext): # Our key frame track FrameNameXyList file names are of the form: # VideoFileBody_nnnnnn_track_fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" - return frame_id_given_frame_id_str( - frame_id_str_given_key_projected_tracks_body_ext(key_track_body_ext) - ) + return frame_id_given_frame_id_str(frame_id_str_given_key_projected_tracks_body_ext(key_track_body_ext)) def frame_id_str_given_key_confirmed_tracks_body_ext(key_track_body_ext): @@ -290,18 +278,14 @@ def frame_id_str_given_key_confirmed_tracks_body_ext(key_track_body_ext): # VideoFileBody_nnnnnn_track_fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" # - return frame_id_str_given_prefix_number_Adjective_KeyWord_body_ext( - key_track_body_ext, 'confirmed', 'tracks' - ) + return frame_id_str_given_prefix_number_Adjective_KeyWord_body_ext(key_track_body_ext, 'confirmed', 'tracks') def frame_id_given_key_confirmed_tracks_body_ext(key_track_body_ext): # Our key frame track FrameNameXyList file names are of the form: # VideoFileBody_nnnnnn_track_fxnl.csv # where the "nnnnnn" is a zero-padded numerical string such as "001258" - return frame_id_given_frame_id_str( - frame_id_str_given_key_confirmed_tracks_body_ext(key_track_body_ext) - ) + return frame_id_given_frame_id_str(frame_id_str_given_key_confirmed_tracks_body_ext(key_track_body_ext)) def frame_id_str_given_frame_file_body_ext(frame_file_body_ext): @@ -340,17 +324,11 @@ def frame_id_given_frame_file_body_ext(frame_file_body_ext): # Our extracted video file names are of the form: # VideoFileBody.nnnnnn.JPG # where the "nnnnnn" is a zero-padded numerical string such as "001258" - return frame_id_given_frame_id_str( - frame_id_str_given_frame_file_body_ext(frame_file_body_ext) - ) + return frame_id_given_frame_id_str(frame_id_str_given_frame_file_body_ext(frame_file_body_ext)) def draw_example_frame( - input_full_frame_dir, - input_frame_file, - output_render_dir, - render_control, - include_figure_idx_in_filename=False, + input_full_frame_dir, input_frame_file, output_render_dir, render_control, include_figure_idx_in_filename=False ): # Load frame. input_dir_body_ext = os.path.join( @@ -406,9 +384,7 @@ def draw_example_frames( # Create sample frame figures. if len(sample_idx_list) > 0: # Prepare directory. - upc.prepare_render_directory( - output_render_dir, delete_suffix, render_control - ) + upc.prepare_render_directory(output_render_dir, delete_suffix, render_control) # Draw example frames. for sample_idx in sample_idx_list: diff --git a/contrib/app/ufacet-s/helio_scan/test/NoTest_Heliostat3dInfer.py b/contrib/app/ufacet-s/helio_scan/test/NoTest_Heliostat3dInfer.py index 5243423b..d7306c91 100644 --- a/contrib/app/ufacet-s/helio_scan/test/NoTest_Heliostat3dInfer.py +++ b/contrib/app/ufacet-s/helio_scan/test/NoTest_Heliostat3dInfer.py @@ -55,24 +55,18 @@ def __init__( # Supporting information. # ?? SCAFFOLDING RCB -- SHOULD BE READ FROM ELSEWHERE. # Solar field parameters. - self.specifications = ( - Dspec.nsttf_specifications() - ) # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL + self.specifications = Dspec.nsttf_specifications() # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL self.heliostat_theoretical = uh3a.read_txt_file_to_heliostat( self.theoretical_heliostat_dir_body_ext, self.specifications ) def execute_infer_3d(self, input_corner_2d_trajectories_file): - hel_name = ( - self.heliostat_name_given_heliostat_2d_corner_trajectories_dir_body_ext( - input_corner_2d_trajectories_file - ) + hel_name = self.heliostat_name_given_heliostat_2d_corner_trajectories_dir_body_ext( + input_corner_2d_trajectories_file ) executable_output_body_ext = hel_name + '_reconstructed.txt' - executable_output_dir_body_ext = os.path.join( - self.output_heliostat_3d_dir, executable_output_body_ext - ) + executable_output_dir_body_ext = os.path.join(self.output_heliostat_3d_dir, executable_output_body_ext) print( 'In Test_Heliostat3dInfer.execute_infer_3d(), input_corner_2d_trajectories_file = ' + input_corner_2d_trajectories_file @@ -82,59 +76,33 @@ def execute_infer_3d(self, input_corner_2d_trajectories_file): 'In Test_Heliostat3dInfer.execute_infer_3d(), executable_output_dir_body_ext = ' + executable_output_dir_body_ext ) - print( - '\nIn Test_Heliostat3dInfer.execute_infer_3d(), self.heliostat_theoretical:' - ) + print('\nIn Test_Heliostat3dInfer.execute_infer_3d(), self.heliostat_theoretical:') dt.print_dict(self.heliostat_theoretical) # hel_name = file.split('/')[-1].split('_')[0] # Perform the 3-d inference. # print('In Test_Heliostat3dInfer.execute_infer_3d(), calling call_executable() for heliostat ' + hel_name + '...') # self.call_executable(file) - print( - 'In In Test_Heliostat3dInfer.execute_infer_3d(), REPLACING CALL_EXECUTABLE()...' - ) + print('In In Test_Heliostat3dInfer.execute_infer_3d(), REPLACING CALL_EXECUTABLE()...') assert False - print( - 'In Test_Heliostat3dInfer.execute_infer_3d(), call_executable() for heliostat ' - + hel_name - + ' finished.' - ) + print('In Test_Heliostat3dInfer.execute_infer_3d(), call_executable() for heliostat ' + hel_name + ' finished.') # We plan to recompile the C++ executable to have more fine-grain control over its output filename, but not today. # So rename the output file to match our naming standard. - print( - 'In Test_Heliostat3dInfer.execute_infer_3d(), renaming output file for heliostat ' - + hel_name - + '...' - ) + print('In Test_Heliostat3dInfer.execute_infer_3d(), renaming output file for heliostat ' + hel_name + '...') executable_output_body_ext = hel_name + '_reconstructed.txt' - executable_output_dir_body_ext = os.path.join( - self.output_heliostat_3d_dir, executable_output_body_ext - ) + executable_output_dir_body_ext = os.path.join(self.output_heliostat_3d_dir, executable_output_body_ext) heliostat_3d_dir_body_ext = os.path.join( - self.output_heliostat_3d_dir, - hel_name + '_' + self.confirm_distort_str + '_corners_3d.txt', + self.output_heliostat_3d_dir, hel_name + '_' + self.confirm_distort_str + '_corners_3d.txt' ) ft.rename_file(executable_output_dir_body_ext, heliostat_3d_dir_body_ext) - print( - 'In Test_Heliostat3dInfer.execute_infer_3d(), calling generate_plots() for heliostat ' - + hel_name - + '...' - ) + print('In Test_Heliostat3dInfer.execute_infer_3d(), calling generate_plots() for heliostat ' + hel_name + '...') uh3a.generate_plots( - heliostat_3d_dir_body_ext, - output_evaluation_plot_dir, - self.specifications, - self.heliostat_theoretical, - ) - print( - 'In Test_Heliostat3dInfer.execute_infer_3d(), generate_plots() for heliostat ' - + hel_name - + ' finished.' + heliostat_3d_dir_body_ext, output_evaluation_plot_dir, self.specifications, self.heliostat_theoretical ) + print('In Test_Heliostat3dInfer.execute_infer_3d(), generate_plots() for heliostat ' + hel_name + ' finished.') return heliostat_3d_dir_body_ext @@ -162,43 +130,28 @@ def execute_infer_3d(self, input_corner_2d_trajectories_file): # hel_name, self.output_heliostat_3d_dir]) # proc.wait() - print( - 'In Test_Heliostat3dInfer.call_executable(), executable for heliostat ' - + hel_name - + ' finished.' - ) + print('In Test_Heliostat3dInfer.call_executable(), executable for heliostat ' + hel_name + ' finished.') def perform_3d_inference(self, single_execution=True): print('self.files =', self.files) - print( - 'In Test_Heliostat3dInfer.perform_3d_inference(), starting reconstruction...' - ) + print('In Test_Heliostat3dInfer.perform_3d_inference(), starting reconstruction...') if single_execution: heliostat_3d_dir_body_ext_list = [] for file in self.files: heliostat_3d_dir_body_ext_list.append(self.execute_infer_3d(file)) else: with Pool(36) as pool: - heliostat_3d_dir_body_ext_list = pool.map( - self.execute_infer_3d, self.files - ) - print( - 'In Test_Heliostat3dInfer.perform_3d_inference() reconstruction finished.' - ) + heliostat_3d_dir_body_ext_list = pool.map(self.execute_infer_3d, self.files) + print('In Test_Heliostat3dInfer.perform_3d_inference() reconstruction finished.') print('heliostat_3d_dir_body_ext_list = ', heliostat_3d_dir_body_ext_list) - print( - 'In Test_Heliostat3dInfer.perform_3d_inference(), starting csv file generation...' - ) + print('In Test_Heliostat3dInfer.perform_3d_inference(), starting csv file generation...') output_evaluation_csv_dir_2 = ( self.output_evaluation_csv_dir + '/' ) # ?? SCAFFOLDING RCB -- MAKE THIS PLATFORM INDEPENDENT. uh3a.generate_csv( - heliostat_3d_dir_body_ext_list, - output_evaluation_csv_dir_2, - self.specifications, - self.heliostat_theoretical, + heliostat_3d_dir_body_ext_list, output_evaluation_csv_dir_2, self.specifications, self.heliostat_theoretical ) print('In Test_Heliostat3dInfer.perform_3d_inference() csv files finished.') diff --git a/contrib/app/ufacet-s/helio_scan/test/NoTest_Reconstruct.py b/contrib/app/ufacet-s/helio_scan/test/NoTest_Reconstruct.py index 2427c997..47b45046 100644 --- a/contrib/app/ufacet-s/helio_scan/test/NoTest_Reconstruct.py +++ b/contrib/app/ufacet-s/helio_scan/test/NoTest_Reconstruct.py @@ -74,77 +74,41 @@ def __init__( # Supporting information. # ?? SCAFFOLDING RCB -- SHOULD BE READ FROM ELSEWHERE. # Solar field parameters. - self.specifications = ( - Dspec.nsttf_specifications() - ) # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL + self.specifications = Dspec.nsttf_specifications() # ?? SCAFFOLDING RCB -- MAKE THIS GENERAL self.heliostat_theoretical = uh3a.read_txt_file_to_heliostat( self.theoretical_heliostat_dir_body_ext, self.specifications ) def execute_reconstruction(self, file): - hel_name = ( - self.heliostat_name_given_heliostat_2d_corner_trajectories_dir_body_ext( - file - ) - ) + hel_name = self.heliostat_name_given_heliostat_2d_corner_trajectories_dir_body_ext(file) # hel_name = file.split('/')[-1].split('_')[0] # Perform the reconstruction. - print( - 'In execute_reconstruction(), calling call_executable() for heliostat ' - + hel_name - + '...' - ) + print('In execute_reconstruction(), calling call_executable() for heliostat ' + hel_name + '...') self.call_executable(file) - print( - 'In execute_reconstruction(), call_executable() for heliostat ' - + hel_name - + ' finished.' - ) + print('In execute_reconstruction(), call_executable() for heliostat ' + hel_name + ' finished.') # We plan to recompile the C++ executable to have more fine-grain control over its output filename, but not today. # So rename the output file to match our naming standard. - print( - 'In execute_reconstruction(), renaming output file for heliostat ' - + hel_name - + '...' - ) + print('In execute_reconstruction(), renaming output file for heliostat ' + hel_name + '...') executable_output_body_ext = hel_name + '_reconstructed.txt' - executable_output_dir_body_ext = os.path.join( - self.output_heliostat_3d_dir, executable_output_body_ext - ) + executable_output_dir_body_ext = os.path.join(self.output_heliostat_3d_dir, executable_output_body_ext) heliostat_3d_dir_body_ext = os.path.join( - self.output_heliostat_3d_dir, - hel_name + '_' + self.confirm_distort_str + '_corners_3d.txt', + self.output_heliostat_3d_dir, hel_name + '_' + self.confirm_distort_str + '_corners_3d.txt' ) ft.rename_file(executable_output_dir_body_ext, heliostat_3d_dir_body_ext) - print( - 'In execute_reconstruction(), calling generate_plots() for heliostat ' - + hel_name - + '...' - ) + print('In execute_reconstruction(), calling generate_plots() for heliostat ' + hel_name + '...') uh3a.generate_plots( - heliostat_3d_dir_body_ext, - output_evaluation_plot_dir, - self.specifications, - self.heliostat_theoretical, - ) - print( - 'In execute_reconstruction(), generate_plots() for heliostat ' - + hel_name - + ' finished.' + heliostat_3d_dir_body_ext, output_evaluation_plot_dir, self.specifications, self.heliostat_theoretical ) + print('In execute_reconstruction(), generate_plots() for heliostat ' + hel_name + ' finished.') return heliostat_3d_dir_body_ext def call_executable(self, file): - hel_name = ( - self.heliostat_name_given_heliostat_2d_corner_trajectories_dir_body_ext( - file - ) - ) + hel_name = self.heliostat_name_given_heliostat_2d_corner_trajectories_dir_body_ext(file) # hel_name = file.split('/')[-1].split('_')[0] fx = self.cam_matrix[0][0] fy = self.cam_matrix[1][1] @@ -158,32 +122,16 @@ def call_executable(self, file): print('In call_executable(), str(cx) =', str(cx)) print('In call_executable(), str(cy) =', str(cy)) print('In call_executable(), heliostat =', hel_name) - print( - 'In call_executable(), self.output_heliostat_3d_dir =', - self.output_heliostat_3d_dir, - ) + print('In call_executable(), self.output_heliostat_3d_dir =', self.output_heliostat_3d_dir) - print( - 'In call_executable(), calling executable for heliostat ' + hel_name + '...' - ) + print('In call_executable(), calling executable for heliostat ' + hel_name + '...') proc = subprocess.Popen( - [ - self.executable_path, - file, - str(fx), - str(fy), - str(cx), - str(cy), - hel_name, - self.output_heliostat_3d_dir, - ] + [self.executable_path, file, str(fx), str(fy), str(cx), str(cy), hel_name, self.output_heliostat_3d_dir] ) proc.wait() - print( - 'In call_executable(), executable for heliostat ' + hel_name + ' finished.' - ) + print('In call_executable(), executable for heliostat ' + hel_name + ' finished.') def perform_reconstruction(self, single_execution=True): print('self.files =', self.files) @@ -194,9 +142,7 @@ def perform_reconstruction(self, single_execution=True): heliostat_3d_dir_body_ext_list.append(self.execute_reconstruction(file)) else: with Pool(36) as pool: - heliostat_3d_dir_body_ext_list = pool.map( - self.execute_reconstruction, self.files - ) + heliostat_3d_dir_body_ext_list = pool.map(self.execute_reconstruction, self.files) print('In perform_reconstruction() reconstruction finished.') print('heliostat_3d_dir_body_ext_list = ', heliostat_3d_dir_body_ext_list) @@ -206,10 +152,7 @@ def perform_reconstruction(self, single_execution=True): self.output_evaluation_csv_dir + '/' ) # ?? SCAFFOLDING RCB -- MAKE THIS PLATFORM INDEPENDENT. uh3a.generate_csv( - heliostat_3d_dir_body_ext_list, - output_evaluation_csv_dir_2, - self.specifications, - self.heliostat_theoretical, + heliostat_3d_dir_body_ext_list, output_evaluation_csv_dir_2, self.specifications, self.heliostat_theoretical ) print('In perform_reconstruction() csv files finished.') diff --git a/contrib/scripts/AbstractFileFingerprint.py b/contrib/scripts/AbstractFileFingerprint.py index 02e86c1c..cde37d77 100644 --- a/contrib/scripts/AbstractFileFingerprint.py +++ b/contrib/scripts/AbstractFileFingerprint.py @@ -17,7 +17,4 @@ def relpath_name_ext(self): def eq_aff(self, other: 'AbstractFileFingerprint'): if not isinstance(other, AbstractFileFingerprint): return False - return ( - self.relative_path == other.relative_path - and self.name_ext == other.name_ext - ) + return self.relative_path == other.relative_path and self.name_ext == other.name_ext diff --git a/contrib/scripts/FileFingerprint.py b/contrib/scripts/FileFingerprint.py index 49135b4a..9845f9ec 100644 --- a/contrib/scripts/FileFingerprint.py +++ b/contrib/scripts/FileFingerprint.py @@ -49,10 +49,7 @@ def for_file(cls, root_path: str, relative_path: str, file_name_ext: str): def __lt__(self, other: 'FileFingerprint'): if not isinstance(other, FileFingerprint): - lt.error_and_raise( - TypeError, - f"'other' is not of type FileFingerprint but instead of type {type(other)}", - ) + lt.error_and_raise(TypeError, f"'other' is not of type FileFingerprint but instead of type {type(other)}") if self.relative_path == other.relative_path: return self.name_ext < other.name_ext return self.relative_path < other.relative_path diff --git a/contrib/scripts/SensitiveStringMatcher.py b/contrib/scripts/SensitiveStringMatcher.py index 2cf9c7f6..723783a1 100644 --- a/contrib/scripts/SensitiveStringMatcher.py +++ b/contrib/scripts/SensitiveStringMatcher.py @@ -74,9 +74,7 @@ def __init__(self, name: str, *patterns: str): p: re.Pattern = pattern patterns[i] = re.compile(p.pattern.lower()) - def _search_pattern( - self, ihaystack: str, pattern: re.Pattern | str - ) -> None | list[int]: + def _search_pattern(self, ihaystack: str, pattern: re.Pattern | str) -> None | list[int]: if isinstance(pattern, str): # Check for occurances of string literals if pattern in ihaystack: @@ -93,9 +91,7 @@ def _search_pattern( return None - def _search_patterns( - self, ihaystack: str, patterns: list[re.Pattern | str] - ) -> dict[re.Pattern | str, list[int]]: + def _search_patterns(self, ihaystack: str, patterns: list[re.Pattern | str]) -> dict[re.Pattern | str, list[int]]: ret: dict[re.Pattern | str, list[int]] = {} for pattern in patterns: diff --git a/contrib/scripts/sensitive_strings.py b/contrib/scripts/sensitive_strings.py index 28efd5c3..f49141b0 100644 --- a/contrib/scripts/sensitive_strings.py +++ b/contrib/scripts/sensitive_strings.py @@ -43,9 +43,7 @@ def __init__( self.verify_all_on_behalf_of_user = False self.remove_unfound_binaries = False self.date_time_str = tdt.current_date_time_string_forfile() - self.tmp_dir_base = ft.norm_path( - os.path.join(orp.opencsp_temporary_dir(), "SensitiveStringSearcher") - ) + self.tmp_dir_base = ft.norm_path(os.path.join(orp.opencsp_temporary_dir(), "SensitiveStringSearcher")) self.git_files_only = True self.is_hdf5_searcher = False self.has_backed_up_allowed_binaries_csv = False @@ -89,22 +87,16 @@ def build_matchers(self): return matchers def norm_path(self, file_path, file_name_ext: str): - return ft.norm_path( - os.path.join(self.root_search_dir, file_path, file_name_ext) - ) + return ft.norm_path(os.path.join(self.root_search_dir, file_path, file_name_ext)) def _is_file_in_cleared_cache(self, file_path: str, file_name_ext: str): - cache_entry = fc.FileCache.for_file( - self.root_search_dir, file_path, file_name_ext - ) + cache_entry = fc.FileCache.for_file(self.root_search_dir, file_path, file_name_ext) if cache_entry in self.cached_cleared_files: return True return False def _register_file_in_cleared_cache(self, file_path: str, file_name_ext: str): - cache_entry = fc.FileCache.for_file( - self.root_search_dir, file_path, file_name_ext - ) + cache_entry = fc.FileCache.for_file(self.root_search_dir, file_path, file_name_ext) self.new_cached_cleared_files.append(cache_entry) def _is_binary_file(self, file_path: str, file_name_ext: str): @@ -132,12 +124,8 @@ def _is_binary_file(self, file_path: str, file_name_ext: str): return is_binary_file - def _enqueue_binary_file_for_later_processing( - self, file_path: str, file_name_ext: str - ): - file_ff = ff.FileFingerprint.for_file( - self.root_search_dir, file_path, file_name_ext - ) + def _enqueue_binary_file_for_later_processing(self, file_path: str, file_name_ext: str): + file_ff = ff.FileFingerprint.for_file(self.root_search_dir, file_path, file_name_ext) if file_ff in self.allowed_binary_files: # we already know and trust this binary file @@ -201,9 +189,7 @@ def search_hdf5_file(self, hdf5_file: ff.FileFingerprint): fout.writelines(allowed_binary_files_lines) # Create a searcher for the unzipped directory - hdf5_searcher = SensitiveStringsSearcher( - h5_dir, self.sensitive_strings_csv, tmp_allowed_binary_csv - ) + hdf5_searcher = SensitiveStringsSearcher(h5_dir, self.sensitive_strings_csv, tmp_allowed_binary_csv) hdf5_searcher.interactive = self.interactive hdf5_searcher.verify_all_on_behalf_of_user = self.verify_all_on_behalf_of_user hdf5_searcher.date_time_str = self.date_time_str @@ -218,25 +204,19 @@ def search_hdf5_file(self, hdf5_file: ff.FileFingerprint): # There was an error, but the user may want to sign off on the file anyways. if len(hdf5_matches) > 0: # Describe the issues with the HDF5 file - lt.warn( - f"Found {len(hdf5_matches)} possible issues with the HDF5 file '{relative_path_name_ext}':" - ) + lt.warn(f"Found {len(hdf5_matches)} possible issues with the HDF5 file '{relative_path_name_ext}':") prev_relpath_name_ext = None for file_relpath_name_ext in hdf5_matches: if prev_relpath_name_ext != file_relpath_name_ext: lt.warn(f" {file_relpath_name_ext}:") prev_relpath_name_ext = file_relpath_name_ext for match in hdf5_matches[file_relpath_name_ext]: - lt.warn( - f" {match.msg} (line {match.lineno}, col {match.colno})" - ) + lt.warn(f" {match.msg} (line {match.lineno}, col {match.colno})") # Ask the user about signing off if self.interactive: if not self.verify_interactively(file_relpath_name_ext): - matches.append( - ssm.Match(0, 0, 0, "", "", None, "HDF5 file denied by user") - ) + matches.append(ssm.Match(0, 0, 0, "", "", None, "HDF5 file denied by user")) else: # if self.interactive for file_relpath_name_ext in hdf5_matches: match = hdf5_matches[file_relpath_name_ext] @@ -271,9 +251,7 @@ def verify_interactively(self, relative_path_name_ext: str, cv_img: Image.Image lt.info("") lt.info("Unknown binary file:") lt.info(" " + relative_path_name_ext) - lt.info( - "Is this unknown binary file safe to add, and doesn't contain any sensitive information (y/n)?" - ) + lt.info("Is this unknown binary file safe to add, and doesn't contain any sensitive information (y/n)?") if self.verify_all_on_behalf_of_user: val = 'y' else: @@ -283,9 +261,7 @@ def verify_interactively(self, relative_path_name_ext: str, cv_img: Image.Image else: lt.info("") - lt.info( - "Is this image safe to add, and doesn't contain any sensitive information (y/n)?" - ) + lt.info("Is this image safe to add, and doesn't contain any sensitive information (y/n)?") if self.verify_all_on_behalf_of_user: val = 'y' else: @@ -320,9 +296,7 @@ def search_binary_file(self, binary_file: ff.FileFingerprint) -> list[ssm.Match] if self.interactive_image_sign_off(file_ff=binary_file): return [] else: - matches.append( - ssm.Match(0, 0, 0, "", "", None, "File denied by user") - ) + matches.append(ssm.Match(0, 0, 0, "", "", None, "File denied by user")) else: matches.append(ssm.Match(0, 0, 0, "", "", None, "Unknown image file")) @@ -339,10 +313,7 @@ def _is_img_ext(self, ext: str): return ext.lower().lstrip(".") in it.pil_image_formats_rw def interactive_image_sign_off( - self, - np_image: np.ndarray = None, - description: str = None, - file_ff: ff.FileFingerprint = None, + self, np_image: np.ndarray = None, description: str = None, file_ff: ff.FileFingerprint = None ) -> bool: if (np_image is None) and (file_ff is not None): file_norm_path = self.norm_path(file_ff.relative_path, file_ff.name_ext) @@ -356,8 +327,7 @@ def interactive_image_sign_off( np_image = np.copy(np.array(img)) img.close() return self.interactive_image_sign_off( - np_image=np_image, - description=f"{file_ff.relative_path}/{file_ff.name_ext}", + np_image=np_image, description=f"{file_ff.relative_path}/{file_ff.name_ext}" ) else: return self.verify_interactively(file_ff.relative_path) @@ -381,7 +351,7 @@ def interactive_image_sign_off( rescaled = " (downscaled)" # Show the image and prompt the user - ret = self.verify_interactively(description, np_image, description+rescaled) + ret = self.verify_interactively(description, np_image, description + rescaled) return ret def _init_files_lists(self): @@ -393,10 +363,7 @@ def _init_files_lists(self): else: abfc_p, abfc_n, abfc_e = ft.path_components(self.allowed_binary_files_csv) self.allowed_binary_files = [ - inst - for inst, _ in ff.FileFingerprint.from_csv( - "Allowed Binary Files", abfc_p, abfc_n + abfc_e - ) + inst for inst, _ in ff.FileFingerprint.from_csv("Allowed Binary Files", abfc_p, abfc_n + abfc_e) ] self.accepted_binary_files.clear() self.unknown_binary_files.clear() @@ -408,10 +375,7 @@ def _init_files_lists(self): sensitive_strings_cache = fc.FileCache.for_file(ss_p, "", ss_n + ss_e) if self.cache_file_csv != None and ft.file_exists(self.cache_file_csv): cp, cn, ce = ft.path_components(self.cache_file_csv) - self.cached_cleared_files = [ - inst - for inst, _ in fc.FileCache.from_csv("Cleared Files Cache", cp, cn + ce) - ] + self.cached_cleared_files = [inst for inst, _ in fc.FileCache.from_csv("Cleared Files Cache", cp, cn + ce)] if not sensitive_strings_cache in self.cached_cleared_files: self.cached_cleared_files.clear() self.new_cached_cleared_files.append(sensitive_strings_cache) @@ -432,12 +396,7 @@ def update_allowed_binaries_csv(self): path, name, ext = ft.path_components(self.allowed_binary_files_csv) self.allowed_binary_files = sorted(self.allowed_binary_files) - self.allowed_binary_files[0].to_csv( - "Allowed Binary Files", - path, - name, - rows=self.allowed_binary_files, - ) + self.allowed_binary_files[0].to_csv("Allowed Binary Files", path, name, rows=self.allowed_binary_files) def search_files(self): self._init_files_lists() @@ -459,19 +418,10 @@ def search_files(self): ) files = [line.val for line in git_committed + git_added] # don't include "git rm"'d files - files = list( - filter( - lambda file: ft.file_exists( - os.path.join(self.root_search_dir, file) - ), - files, - ) - ) + files = list(filter(lambda file: ft.file_exists(os.path.join(self.root_search_dir, file)), files)) lt.info(f"Searching for sensitive strings in {len(files)} tracked files") else: - files = ft.files_in_directory( - self.root_search_dir, files_only=True, recursive=True - ) + files = ft.files_in_directory(self.root_search_dir, files_only=True, recursive=True) lt.info(f"Searching for sensitive strings in {len(files)} files") files = sorted(list(set(files))) @@ -487,9 +437,7 @@ def search_files(self): # need to check this file if self._is_binary_file(file_path, file_name_ext): # deal with non-parseable binary files as a group, below - self._enqueue_binary_file_for_later_processing( - file_path, file_name_ext - ) + self._enqueue_binary_file_for_later_processing(file_path, file_name_ext) else: # check text files for sensitive strings file_matches = self.search_file(file_path, file_name_ext) @@ -513,9 +461,7 @@ def search_files(self): for match in matches[file]: lt.error(f" {match.msg}") if len(self.unfound_allowed_binary_files) > 0: - lt.error( - f"Expected {len(self.unfound_allowed_binary_files)} binary files that can't be found:" - ) + lt.error(f"Expected {len(self.unfound_allowed_binary_files)} binary files that can't be found:") for file_ff in self.unfound_allowed_binary_files: lt.info("") lt.error(os.path.join(file_ff.relative_path, file_ff.name_ext)) @@ -553,24 +499,16 @@ def search_files(self): ) for _match in parsable_matches: match: ssm.Match = _match - lt.error( - " " - + match.msg - + f" (line {match.lineno}, col {match.colno})" - ) + lt.error(" " + match.msg + f" (line {match.lineno}, col {match.colno})") # Date+time stamp the new allowed list csv files if num_signed_binary_files > 0: path, name, ext = ft.path_components(self.allowed_binary_files_csv) abfc_stamped_name_ext = f"{name}_{self.date_time_str}{ext}" - abfc_stamped_path_name_ext = os.path.join( - path, abfc_stamped_name_ext - ) + abfc_stamped_path_name_ext = os.path.join(path, abfc_stamped_name_ext) if ft.file_exists(abfc_stamped_path_name_ext): ft.delete_file(abfc_stamped_path_name_ext) - ft.copy_file( - self.allowed_binary_files_csv, path, abfc_stamped_name_ext - ) + ft.copy_file(self.allowed_binary_files_csv, path, abfc_stamped_name_ext) # for file_ff in unknowns_copy # if len(self.unknown_binary_files) > 0: @@ -600,24 +538,16 @@ def search_files(self): # Executive summary info_or_warn = lt.info - ret = ( - len(matches) - + len(self.unfound_allowed_binary_files) - + len(self.unknown_binary_files) - ) + ret = len(matches) + len(self.unfound_allowed_binary_files) + len(self.unknown_binary_files) if ret > 0: info_or_warn = lt.warn info_or_warn("Summary:") info_or_warn("<<>>" if ret == 0 else "<<>>") info_or_warn(f"Found {len(matches)} sensitive string matches") if len(self.unfound_allowed_binary_files) > 0: - info_or_warn( - f"Did not find {len(self.unfound_allowed_binary_files)} expected binary files" - ) + info_or_warn(f"Did not find {len(self.unfound_allowed_binary_files)} expected binary files") else: - info_or_warn( - f"Found {len(self.allowed_binary_files)} expected binary files" - ) + info_or_warn(f"Found {len(self.allowed_binary_files)} expected binary files") info_or_warn(f"Found {len(self.unknown_binary_files)} unexpected binary files") # Add a 'match' for any unfound or unknown binary files @@ -625,47 +555,47 @@ def search_files(self): for file_ff in self.unfound_allowed_binary_files: fpne = f"{file_ff.relative_path}/{file_ff.name_ext}" matches[fpne] = [] if (fpne not in matches) else matches[fpne] - matches[fpne].append( - ssm.Match(0, 0, 0, "", "", None, f"Unfound binary file {fpne}") - ) + matches[fpne].append(ssm.Match(0, 0, 0, "", "", None, f"Unfound binary file {fpne}")) for file_ff in self.unknown_binary_files: fpne = f"{file_ff.relative_path}/{file_ff.name_ext}" matches[fpne] = [] if (fpne not in matches) else matches[fpne] - matches[fpne].append( - ssm.Match(0, 0, 0, "", "", None, f"Unknown binary file {fpne}") - ) + matches[fpne].append(ssm.Match(0, 0, 0, "", "", None, f"Unknown binary file {fpne}")) self.matches = matches return ret if __name__ == "__main__": - parser = argparse.ArgumentParser( - prog=__file__.rstrip(".py"), description='Sensitive strings searcher' + parser = argparse.ArgumentParser(prog=__file__.rstrip(".py"), description='Sensitive strings searcher') + parser.add_argument( + '--no-interactive', + action='store_true', + dest="ninteractive", + help="Don't interactively ask the user about unknown binary files. Simply fail instead.", + ) + parser.add_argument( + '--accept-all', + action='store_true', + dest="acceptall", + help="Don't interactively ask the user about unknown binary files. Simply accept all as verified on the user's behalf. " + + "This can be useful when you're confident that the only changes have been that the binary files have moved but not changed.", + ) + parser.add_argument( + '--accept-unfound', + action='store_true', + dest="acceptunfound", + help="Don't fail because of unfound expected binary files. Instead remove the expected files from the list of allowed binaries. " + + "This can be useful when you're confident that the only changes have been that the binary files have moved but not changed.", ) - parser.add_argument('--no-interactive', action='store_true', dest="ninteractive", - help="Don't interactively ask the user about unknown binary files. Simply fail instead.") - parser.add_argument('--accept-all', action='store_true', dest="acceptall", - help="Don't interactively ask the user about unknown binary files. Simply accept all as verified on the user's behalf. " + - "This can be useful when you're confident that the only changes have been that the binary files have moved but not changed.") - parser.add_argument('--accept-unfound', action='store_true', dest="acceptunfound", - help="Don't fail because of unfound expected binary files. Instead remove the expected files from the list of allowed binaries. " + - "This can be useful when you're confident that the only changes have been that the binary files have moved but not changed.") args = parser.parse_args() not_interactive: bool = args.ninteractive accept_all: bool = args.acceptall remove_unfound_binaries: bool = args.acceptunfound - ss_log_dir = ft.norm_path( - opencsp_settings['sensitive_strings']['sensitive_strings_dir'] - ) + ss_log_dir = ft.norm_path(opencsp_settings['sensitive_strings']['sensitive_strings_dir']) log_path = ft.norm_path(os.path.join(ss_log_dir, "sensitive_strings_log.txt")) - sensitive_strings_csv = ft.norm_path( - opencsp_settings['sensitive_strings']['sensitive_strings_file'] - ) - allowed_binary_files_csv = ft.norm_path( - opencsp_settings['sensitive_strings']['allowed_binaries_file'] - ) + sensitive_strings_csv = ft.norm_path(opencsp_settings['sensitive_strings']['sensitive_strings_file']) + allowed_binary_files_csv = ft.norm_path(opencsp_settings['sensitive_strings']['allowed_binaries_file']) ss_cache_file = ft.norm_path(opencsp_settings['sensitive_strings']['cache_file']) date_time_str = tdt.current_date_time_string_forfile() @@ -675,9 +605,7 @@ def search_files(self): lt.logger(log_path) root_search_dir = os.path.join(orp.opencsp_code_dir(), "..") - searcher = SensitiveStringsSearcher( - root_search_dir, sensitive_strings_csv, allowed_binary_files_csv, ss_cache_file - ) + searcher = SensitiveStringsSearcher(root_search_dir, sensitive_strings_csv, allowed_binary_files_csv, ss_cache_file) searcher.interactive = not not_interactive searcher.verify_all_on_behalf_of_user = accept_all searcher.remove_unfound_binaries = remove_unfound_binaries diff --git a/contrib/scripts/test/test_FileFingerprint.py b/contrib/scripts/test/test_FileFingerprint.py index 61a76a36..a48a14e4 100644 --- a/contrib/scripts/test/test_FileFingerprint.py +++ b/contrib/scripts/test/test_FileFingerprint.py @@ -26,12 +26,8 @@ def test_equal(self): f2 = "equal_file" contents = "%0.10f" % random.Random().random() - ft.write_text_file( - f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False - ) - ft.write_text_file( - f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False - ) + ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False) + ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False) ff1 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d1}", "", f1 + ".txt") ff2 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d2}", "", f2 + ".txt") @@ -44,12 +40,8 @@ def test_not_equal_relpath(self): f2 = "equal_file" contents = "%0.10f" % random.Random().random() - ft.write_text_file( - f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False - ) - ft.write_text_file( - f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False - ) + ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False) + ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False) ff1 = ff.FileFingerprint.for_file(self.out_dir, d1, f1 + ".txt") ff2 = ff.FileFingerprint.for_file(self.out_dir, d2, f2 + ".txt") @@ -62,12 +54,8 @@ def test_not_equal_filename(self): f2 = "equal_file2" contents = "%0.10f" % random.Random().random() - ft.write_text_file( - f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False - ) - ft.write_text_file( - f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False - ) + ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False) + ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False) ff1 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d1}", "", f1 + ".txt") ff2 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d2}", "", f2 + ".txt") @@ -82,12 +70,8 @@ def test_not_equal_hash(self): contents1 = contents + " " contents2 = " " + contents - ft.write_text_file( - f1, f"{self.out_dir}/{d1}", f1, [contents1], error_if_dir_not_exist=False - ) - ft.write_text_file( - f2, f"{self.out_dir}/{d2}", f2, [contents2], error_if_dir_not_exist=False - ) + ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents1], error_if_dir_not_exist=False) + ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents2], error_if_dir_not_exist=False) ff1 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d1}", "", f1 + ".txt") ff2 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d2}", "", f2 + ".txt") diff --git a/contrib/scripts/test/test_SensitiveStringMatcher.py b/contrib/scripts/test/test_SensitiveStringMatcher.py index 0b5552f3..8f080c6d 100644 --- a/contrib/scripts/test/test_SensitiveStringMatcher.py +++ b/contrib/scripts/test/test_SensitiveStringMatcher.py @@ -54,24 +54,18 @@ def test_matches(self): self.assertEqual(3, matches[2].lineno) def test_dont_match(self): - matcher = ssm.SensitiveStringMatcher( - "Basic Matcher", "foo", "**dont_match", "foo" - ) + matcher = ssm.SensitiveStringMatcher("Basic Matcher", "foo", "**dont_match", "foo") matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(0, len(matches)) def test_case_sensitive(self): matcher = ssm.SensitiveStringMatcher("Basic Matcher", "**case_sensitive", "foo") - matches = matcher.check_lines( - ["foO", "fOo", "fOO", "Foo", "FoO", "FOo", "FOO", "foo"] - ) + matches = matcher.check_lines(["foO", "fOo", "fOO", "Foo", "FoO", "FOo", "FOO", "foo"]) self.assertEqual(1, len(matches)) self.assertEqual(8, matches[0].lineno) def test_single_regex(self): - matcher = ssm.SensitiveStringMatcher( - "Basic Matcher", "**next_is_regex", r"[a-z]a[a-z]" - ) + matcher = ssm.SensitiveStringMatcher("Basic Matcher", "**next_is_regex", r"[a-z]a[a-z]") matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(2, len(matches)) self.assertEqual(2, matches[0].lineno) @@ -80,34 +74,26 @@ def test_single_regex(self): self.assertEqual('baz', matches[1].line_part) def test_partial_single_regex(self): - matcher = ssm.SensitiveStringMatcher( - "Regex Matcher", "**next_is_regex", r"[a-z]o[a-z]" - ) + matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**next_is_regex", r"[a-z]o[a-z]") matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(1, len(matches)) self.assertEqual(0, matches[0].colno) self.assertEqual('foo', matches[0].line_part) - matcher = ssm.SensitiveStringMatcher( - "Regex Matcher", "**next_is_regex", r"[a-z]{2}r" - ) + matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**next_is_regex", r"[a-z]{2}r") matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(1, len(matches)) self.assertEqual(3, matches[0].colno) self.assertEqual('bar', matches[0].line_part) - matcher = ssm.SensitiveStringMatcher( - "Regex Matcher", "**next_is_regex", r"[a-z]{2}z" - ) + matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**next_is_regex", r"[a-z]{2}z") matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(1, len(matches)) self.assertEqual(6, matches[0].colno) self.assertEqual('baz', matches[0].line_part) def test_partial_multiple_regex(self): - matcher = ssm.SensitiveStringMatcher( - "Regex Matcher", "**all_regex", r"[a-z]o[a-z]", r"[a-z]{2}r", r"[a-z]{2}z" - ) + matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**all_regex", r"[a-z]o[a-z]", r"[a-z]{2}r", r"[a-z]{2}z") matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(3, len(matches)) self.assertEqual(0, matches[0].colno) @@ -118,9 +104,7 @@ def test_partial_multiple_regex(self): self.assertEqual('baz', matches[2].line_part) def test_mixed_plain_regex(self): - matcher = ssm.SensitiveStringMatcher( - "Basic Matcher", "foo", "**next_is_regex", r"[a-z]{2}r", "baz" - ) + matcher = ssm.SensitiveStringMatcher("Basic Matcher", "foo", "**next_is_regex", r"[a-z]{2}r", "baz") matches = matcher.check_lines(["foobarbaz"]) self.assertLessEqual(1, len(matches)) @@ -143,19 +127,12 @@ def test_mixed_plain_regex(self): self.assertEqual('baz', matches[0].line_part) def test_regex_dont_match(self): - matcher = ssm.SensitiveStringMatcher( - "Basic Matcher", "foo", "**dont_match", "**next_is_regex", r"[a-z]o[a-z]" - ) + matcher = ssm.SensitiveStringMatcher("Basic Matcher", "foo", "**dont_match", "**next_is_regex", r"[a-z]o[a-z]") matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(0, len(matches)) matcher = ssm.SensitiveStringMatcher( - "Basic Matcher", - "**all_regex", - "foo.?", - "**dont_match", - "**next_is_regex", - r"[a-z]{4}", + "Basic Matcher", "**all_regex", "foo.?", "**dont_match", "**next_is_regex", r"[a-z]{4}" ) matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(1, len(matches)) diff --git a/contrib/scripts/test/test_sensitive_strings.py b/contrib/scripts/test/test_sensitive_strings.py index d9bd9dde..0effaec5 100644 --- a/contrib/scripts/test/test_sensitive_strings.py +++ b/contrib/scripts/test/test_sensitive_strings.py @@ -20,52 +20,38 @@ def setUp(self) -> None: self.root_search_dir = os.path.join(self.data_dir, "root_search_dir") self.ss_dir = os.path.join(self.data_dir, "per_test_sensitive_strings") - self.allowed_binaries_dir = os.path.join( - self.data_dir, "per_test_allowed_binaries" - ) + self.allowed_binaries_dir = os.path.join(self.data_dir, "per_test_allowed_binaries") self.all_binaries = os.path.join(self.allowed_binaries_dir, "all_binaries.csv") self.no_binaries = os.path.join(self.allowed_binaries_dir, "no_binaries.csv") def test_no_matches(self): sensitive_strings_csv = os.path.join(self.ss_dir, "no_matches.csv") - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, self.all_binaries - ) + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, self.all_binaries) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 0) def test_single_matcher(self): # based on file name sensitive_strings_csv = os.path.join(self.ss_dir, "test_single_matcher.csv") - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, self.all_binaries - ) + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, self.all_binaries) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) # based on file content - sensitive_strings_csv = os.path.join( - self.ss_dir, "test_single_matcher_content.csv" - ) - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, self.all_binaries - ) + sensitive_strings_csv = os.path.join(self.ss_dir, "test_single_matcher_content.csv") + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, self.all_binaries) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) def test_directory_matcher(self): sensitive_strings_csv = os.path.join(self.ss_dir, "test_directory_matcher.csv") - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, self.all_binaries - ) + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, self.all_binaries) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) def test_all_matches(self): sensitive_strings_csv = os.path.join(self.ss_dir, "test_all_matches.csv") - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, self.no_binaries - ) + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, self.no_binaries) searcher.git_files_only = False # 6 matches: # files: a.txt, b/b.txt, c/d/e.txt @@ -76,29 +62,21 @@ def test_all_matches(self): def test_single_unknown_binary(self): sensitive_strings_csv = os.path.join(self.ss_dir, "no_matches.csv") single_binary_csv = os.path.join(self.allowed_binaries_dir, "single_binary.csv") - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, single_binary_csv - ) + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, single_binary_csv) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) def test_single_expected_not_found_binary(self): sensitive_strings_csv = os.path.join(self.ss_dir, "no_matches.csv") - single_binary_csv = os.path.join( - self.allowed_binaries_dir, "single_expected_not_found_binary.csv" - ) - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, single_binary_csv - ) + single_binary_csv = os.path.join(self.allowed_binaries_dir, "single_expected_not_found_binary.csv") + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, single_binary_csv) searcher.git_files_only = False # 2 unknown binaries, and 1 expected not found self.assertEqual(searcher.search_files(), 3) def test_hdf5_match(self): sensitive_strings_csv = os.path.join(self.ss_dir, "h5_match.csv") - searcher = ss.SensitiveStringsSearcher( - self.root_search_dir, sensitive_strings_csv, self.all_binaries - ) + searcher = ss.SensitiveStringsSearcher(self.root_search_dir, sensitive_strings_csv, self.all_binaries) searcher.git_files_only = False # 2 unknown binaries, and 1 expected not found self.assertEqual(searcher.search_files(), 1) diff --git a/contrib/test_data_generation/downsample_data_general.py b/contrib/test_data_generation/downsample_data_general.py index 53e80beb..0c2906ac 100644 --- a/contrib/test_data_generation/downsample_data_general.py +++ b/contrib/test_data_generation/downsample_data_general.py @@ -58,11 +58,7 @@ def downsample_images(images: np.ndarray, n: int) -> np.ndarray: n_images = images.shape[2] for idx_im in range(n_images): - images_ds_list.append( - convolve2d(images[..., idx_im], ker, mode='valid')[::n, ::n, None].astype( - 'uint8' - ) - ) + images_ds_list.append(convolve2d(images[..., idx_im], ker, mode='valid')[::n, ::n, None].astype('uint8')) images_out = np.concatenate(images_ds_list, 2) diff --git a/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py b/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py index cd5fb442..a5fe2d3a 100644 --- a/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py +++ b/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py @@ -20,9 +20,7 @@ def generate_data(): """Downsamples and saves files""" # Define file locations - dir_sample_data = join( - opencsp_code_dir(), '../../sample_data/scene_reconstruction/data_measurement' - ) + dir_sample_data = join(opencsp_code_dir(), '../../sample_data/scene_reconstruction/data_measurement') files_images = glob(join(dir_sample_data, 'aruco_marker_images/*.JPG')) file_alignment_points = join(dir_sample_data, 'alignment_points.csv') @@ -30,9 +28,7 @@ def generate_data(): file_point_pair_dists = join(dir_sample_data, 'point_pair_distances.csv') file_camera_cal = join(dir_sample_data, 'camera.h5') - dir_save = join( - opencsp_code_dir(), 'app/scene_reconstruction/test/data/data_measurement' - ) + dir_save = join(opencsp_code_dir(), 'app/scene_reconstruction/test/data/data_measurement') # Downsample marker/dot images n_downsample = 5 diff --git a/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py b/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py index 0a525a1f..d28225a2 100644 --- a/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py +++ b/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py @@ -20,8 +20,7 @@ def generate_data(): """Downsamples and saves files""" # Define file locations dir_sample_data = join( - opencsp_code_dir(), - '../../sample_data/deflectometry/calibration_dot_locations/data_measurement', + opencsp_code_dir(), '../../sample_data/deflectometry/calibration_dot_locations/data_measurement' ) files_images = glob(join(dir_sample_data, 'images/*.JPG')) @@ -30,10 +29,7 @@ def generate_data(): file_camera_def = join(dir_sample_data, 'camera_deflectometry.h5') file_image_def = join(dir_sample_data, 'image_deflectometry_camera.png') - dir_save = join( - opencsp_code_dir(), - 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements', - ) + dir_save = join(opencsp_code_dir(), 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements') # Downsample marker/dot images n_downsample = 4 diff --git a/contrib/test_data_generation/sofast_fringe/downsample_data.py b/contrib/test_data_generation/sofast_fringe/downsample_data.py index 9f0a7fc5..1e9ef5c3 100644 --- a/contrib/test_data_generation/sofast_fringe/downsample_data.py +++ b/contrib/test_data_generation/sofast_fringe/downsample_data.py @@ -4,9 +4,7 @@ import os import sys -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.common.lib.opencsp_path.opencsp_root_path import opencsp_code_dir sys.path.append(os.path.join(opencsp_code_dir(), '..')) diff --git a/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py b/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py index 1ff0c306..62d01aff 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py +++ b/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py @@ -32,22 +32,14 @@ def downsample_dataset_1(base_dir): # Define location of sample data file_measurement_facet = abspath(join(base_dir, 'sofast/measurement_facet.h5')) - file_measurement_ensemble = abspath( - join(base_dir, 'sofast/measurement_facet_ensemble.h5') - ) + file_measurement_ensemble = abspath(join(base_dir, 'sofast/measurement_facet_ensemble.h5')) file_calibration = abspath(join(base_dir, 'sofast/image_calibration.h5')) file_camera = abspath(join(base_dir, 'calibration_files/camera.h5')) - file_display_1 = abspath( - join(base_dir, 'calibration_files/display_distorted_2d.h5') - ) - file_display_2 = abspath( - join(base_dir, 'calibration_files/display_distorted_3d.h5') - ) + file_display_1 = abspath(join(base_dir, 'calibration_files/display_distorted_2d.h5')) + file_display_2 = abspath(join(base_dir, 'calibration_files/display_distorted_3d.h5')) file_display_3 = abspath(join(base_dir, 'calibration_files/display_rectangular.h5')) - dir_dataset_out = abspath( - join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') - ) + dir_dataset_out = abspath(join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe')) if not exists(dir_dataset_out): raise FileNotFoundError(f'Output directory {dir_dataset_out} does not exist.') @@ -71,12 +63,8 @@ def downsample_dataset_1(base_dir): plt.title('Ensemble Mask Image') # Save data - measurement_facet.save_to_hdf( - join(dir_dataset_out, basename(file_measurement_facet)) - ) - measurement_ensemble.save_to_hdf( - join(dir_dataset_out, basename(file_measurement_ensemble)) - ) + measurement_facet.save_to_hdf(join(dir_dataset_out, basename(file_measurement_facet))) + measurement_ensemble.save_to_hdf(join(dir_dataset_out, basename(file_measurement_ensemble))) camera.save_to_hdf(join(dir_dataset_out, basename(file_camera))) display_1.save_to_hdf(join(dir_dataset_out, basename(file_display_1))) display_2.save_to_hdf(join(dir_dataset_out, basename(file_display_2))) @@ -88,7 +76,5 @@ def downsample_dataset_1(base_dir): if __name__ == '__main__': # Create downsample dataset 1 (NSTTF Optics Lab data) - dir_sample_data = join( - opencsp_code_dir(), '../../sample_data/sofast/measurement_set_1' - ) + dir_sample_data = join(opencsp_code_dir(), '../../sample_data/sofast/measurement_set_1') downsample_dataset_1(dir_sample_data) diff --git a/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py b/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py index 8a78acb7..dc2db410 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py +++ b/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py @@ -41,9 +41,7 @@ def downsample_dataset(dir_input: str, dir_output: str) -> None: shutil.copy(join(dir_input, file), join(dir_output, file)) # Downsample screen distortion measurements - dir_output_screen_measurements = join( - dir_output, 'screen_shape_sofast_measurements' - ) + dir_output_screen_measurements = join(dir_output, 'screen_shape_sofast_measurements') if not os.path.exists(dir_output_screen_measurements): os.makedirs(dir_output_screen_measurements) files_meas = [ @@ -54,23 +52,18 @@ def downsample_dataset(dir_input: str, dir_output: str) -> None: for file_meas in files_meas: print(f'Downsampling sofast measurement: {os.path.basename(file_meas):s}...') meas_ds = dds.downsample_measurement(file_meas, n_sofast) - meas_ds.save_to_hdf( - join(dir_output_screen_measurements, os.path.basename(file_meas)) - ) + meas_ds.save_to_hdf(join(dir_output_screen_measurements, os.path.basename(file_meas))) # Downsample screen distortion camera print('Downsampling sofast camera...') - camera_sofast_ds = ddg.downsample_camera( - join(dir_input, 'camera_screen_shape.h5'), n_sofast - ) + camera_sofast_ds = ddg.downsample_camera(join(dir_input, 'camera_screen_shape.h5'), n_sofast) camera_sofast_ds.save_to_hdf(join(dir_output, 'camera_screen_shape.h5')) if __name__ == '__main__': downsample_dataset( dir_input=join( - opencsp_code_dir(), - '../../sample_data/sofast/data_photogrammetric_calibration/data_measurement', + opencsp_code_dir(), '../../sample_data/sofast/data_photogrammetric_calibration/data_measurement' ), dir_output=join(opencsp_code_dir, 'test/data/measurements_sofast_fringe'), ) diff --git a/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py b/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py index 44a447c0..51e7a3e8 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py +++ b/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py @@ -7,9 +7,7 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.DefinitionEnsemble import DefinitionEnsemble @@ -30,9 +28,7 @@ def generate_dataset( """Generates and saves test data""" # Check output file exists if not exists(dirname(file_dataset_out)): - raise FileNotFoundError( - f'Output directory {file_dataset_out:s} does not exist.' - ) + raise FileNotFoundError(f'Output directory {file_dataset_out:s} does not exist.') # Load components camera = Camera.load_from_hdf(file_camera) @@ -58,10 +54,7 @@ def generate_dataset( # Define surface data surface_data = [ dict( - surface_type='parabolic', - initial_focal_lengths_xy=(100.0, 100.0), - robust_least_squares=False, - downsample=10, + surface_type='parabolic', initial_focal_lengths_xy=(100.0, 100.0), robust_least_squares=False, downsample=10 ) ] * ensemble_data.num_facets diff --git a/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py b/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py index dd35c421..ccc049e7 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py +++ b/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py @@ -9,9 +9,7 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.DefinitionFacet import DefinitionFacet @@ -32,9 +30,7 @@ def generate_dataset( """Generates and saves dataset""" # Check output file exists if not exists(dirname(file_dataset_out)): - raise FileNotFoundError( - f'Output directory {file_dataset_out:s} does not exist.' - ) + raise FileNotFoundError(f'Output directory {file_dataset_out:s} does not exist.') # Load components camera = Camera.load_from_hdf(file_camera) @@ -61,9 +57,7 @@ def generate_dataset( downsample=10, ) elif surface_type == 'plano': - surface_data = dict( - surface_type=surface_type, robust_least_squares=robust_ls, downsample=10 - ) + surface_data = dict(surface_type=surface_type, robust_least_squares=robust_ls, downsample=10) # Process optic data sofast.process_optic_singlefacet(facet_data, surface_data) diff --git a/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py b/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py index 9c18ec63..1282f6f0 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py +++ b/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py @@ -7,9 +7,7 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.common.lib.camera.Camera import Camera @@ -17,18 +15,12 @@ def generate_dataset( - file_measurement: str, - file_camera: str, - file_display: str, - file_calibration: str, - file_dataset_out: str, + file_measurement: str, file_camera: str, file_display: str, file_calibration: str, file_dataset_out: str ): """Generates and saves dataset""" # Check output file exists if not exists(dirname(file_dataset_out)): - raise FileNotFoundError( - f'Output directory {file_dataset_out:s} does not exist.' - ) + raise FileNotFoundError(f'Output directory {file_dataset_out:s} does not exist.') # Load components camera = Camera.load_from_hdf(file_camera) @@ -47,10 +39,7 @@ def generate_dataset( # Define surface data surface_data = dict( - surface_type='parabolic', - initial_focal_lengths_xy=(100.0, 100.0), - robust_least_squares=False, - downsample=10, + surface_type='parabolic', initial_focal_lengths_xy=(100.0, 100.0), robust_least_squares=False, downsample=10 ) # Process optic data diff --git a/example/camera_calibration/example_view_camera_distortion.py b/example/camera_calibration/example_view_camera_distortion.py index 8bb63296..2476ee56 100644 --- a/example/camera_calibration/example_view_camera_distortion.py +++ b/example/camera_calibration/example_view_camera_distortion.py @@ -16,9 +16,7 @@ def example_driver(): """ # Define input camera file - file = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe/camera.h5' - ) + file = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe/camera.h5') # Load camera cam = Camera.load_from_hdf(file) diff --git a/example/camera_io/live_view_color_Basler.py b/example/camera_io/live_view_color_Basler.py index dbf26f24..f50d88f7 100644 --- a/example/camera_io/live_view_color_Basler.py +++ b/example/camera_io/live_view_color_Basler.py @@ -6,16 +6,13 @@ import argparse -from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ( - ImageAcquisition as ImageAcquisitionColor, -) +from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ImageAcquisition as ImageAcquisitionColor from opencsp.common.lib.camera.LiveView import LiveView def main(): parser = argparse.ArgumentParser( - prog='run_and_save_images_Basler_color', - description='Shows live view from Basler color camera.', + prog='run_and_save_images_Basler_color', description='Shows live view from Basler color camera.' ) parser.add_argument( 'camera_index', @@ -23,14 +20,8 @@ def main(): type=int, help='Camera index (0-indexed in order of camera serial number) to run.', ) - parser.add_argument( - '--calibrate', - action='store_true', - help='calibrate camera exposure before capture', - ) - parser.add_argument( - '-e', metavar='exposure', type=float, default=None, help='Camera exposure value' - ) + parser.add_argument('--calibrate', action='store_true', help='calibrate camera exposure before capture') + parser.add_argument('-e', metavar='exposure', type=float, default=None, help='Camera exposure value') args = parser.parse_args() # Connect to camera diff --git a/example/camera_io/live_view_mono_Basler.py b/example/camera_io/live_view_mono_Basler.py index f42e77b6..590e7337 100644 --- a/example/camera_io/live_view_mono_Basler.py +++ b/example/camera_io/live_view_mono_Basler.py @@ -6,16 +6,13 @@ import argparse -from opencsp.common.lib.camera.ImageAcquisition_DCAM_mono import ( - ImageAcquisition as ImageAcquisitionMono, -) +from opencsp.common.lib.camera.ImageAcquisition_DCAM_mono import ImageAcquisition as ImageAcquisitionMono from opencsp.common.lib.camera.LiveView import LiveView def main(): parser = argparse.ArgumentParser( - prog='run_and_save_images_Basler_color', - description='Shows live view from Basler monochrome camera.', + prog='run_and_save_images_Basler_color', description='Shows live view from Basler monochrome camera.' ) parser.add_argument( 'camera_index', @@ -23,14 +20,8 @@ def main(): type=int, help='Camera index (0-indexed in order of camera serial number) to run.', ) - parser.add_argument( - '--calibrate', - action='store_true', - help='calibrate camera exposure before capture', - ) - parser.add_argument( - '-e', metavar='exposure', type=float, default=None, help='Camera exposure value' - ) + parser.add_argument('--calibrate', action='store_true', help='calibrate camera exposure before capture') + parser.add_argument('-e', metavar='exposure', type=float, default=None, help='Camera exposure value') args = parser.parse_args() # Connect to camera diff --git a/example/camera_io/run_and_save_images_Basler_color.py b/example/camera_io/run_and_save_images_Basler_color.py index 15941cc3..45c43764 100644 --- a/example/camera_io/run_and_save_images_Basler_color.py +++ b/example/camera_io/run_and_save_images_Basler_color.py @@ -7,9 +7,7 @@ import argparse import imageio.v3 as imageio -from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ( - ImageAcquisition as ImageAcquisitionColor, -) +from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ImageAcquisition as ImageAcquisitionColor def main(): @@ -18,18 +16,10 @@ def main(): description='Captures N frames from a color Basler camera. Saves images as 12bit numbers packed in 16 bit integers in TIFF format with filenames of the form: xx.tiff', ) parser.add_argument( - 'camera_index', - type=int, - help='Camera index (0-indexed in order of camera serial number) to run.', - ) - parser.add_argument( - 'num_images', type=int, help='Number of images to capture and save.' - ) - parser.add_argument( - '--calibrate', - action='store_true', - help='Calibrate camera exposure before capture.', + 'camera_index', type=int, help='Camera index (0-indexed in order of camera serial number) to run.' ) + parser.add_argument('num_images', type=int, help='Number of images to capture and save.') + parser.add_argument('--calibrate', action='store_true', help='Calibrate camera exposure before capture.') parser.add_argument( '-p', '--prefix', diff --git a/example/csp/example_optics_and_ray_tracing.py b/example/csp/example_optics_and_ray_tracing.py index 88a9c761..a8f7c897 100644 --- a/example/csp/example_optics_and_ray_tracing.py +++ b/example/csp/example_optics_and_ray_tracing.py @@ -27,13 +27,9 @@ import opencsp.common.lib.render.figure_management as fm import opencsp.common.lib.render_control.RenderControlAxis as rca import opencsp.common.lib.render_control.RenderControlFigure as rcfg -from opencsp.common.lib.render_control.RenderControlLightPath import ( - RenderControlLightPath, -) +from opencsp.common.lib.render_control.RenderControlLightPath import RenderControlLightPath import opencsp.common.lib.render_control.RenderControlMirror as rcm -from opencsp.common.lib.render_control.RenderControlRayTrace import ( - RenderControlRayTrace, -) +from opencsp.common.lib.render_control.RenderControlRayTrace import RenderControlRayTrace def visualize_mirror() -> None: @@ -49,9 +45,7 @@ def visualize_mirror() -> None: optic_loc = Vxyz((0, 95, 0)) # Calculate mirror pointing az/el - v_sun = Vxyz((0, 1, 0)).rotate( - Rotation.from_euler('xz', [sun_el, -sun_azm], degrees=True) - ) + v_sun = Vxyz((0, 1, 0)).rotate(Rotation.from_euler('xz', [sun_el, -sun_azm], degrees=True)) v_optic_targ = (targ_loc - optic_loc).normalize() v_pointing = (v_sun + v_optic_targ).normalize() rot_pointing = Vxyz((0, 0, 1)).align_to(v_pointing) @@ -89,9 +83,7 @@ def visualize_facet() -> None: optic_loc = Vxyz((0, 95, 0)) # Calculate facet pointing az/el - v_sun = Vxyz((0, 1, 0)).rotate( - Rotation.from_euler('xz', [sun_el, -sun_azm], degrees=True) - ) + v_sun = Vxyz((0, 1, 0)).rotate(Rotation.from_euler('xz', [sun_el, -sun_azm], degrees=True)) v_optic_targ = (targ_loc - optic_loc).normalize() v_pointing = (v_sun + v_optic_targ).normalize() rot_pointing = Vxyz((0, 0, 1)).align_to(v_pointing) @@ -132,9 +124,7 @@ def visualize_mirror_array() -> None: optic_loc = Vxyz((0, 95, 0)) # Calculate mirror_array pointing az/el - v_sun = Vxyz((0, 1, 0)).rotate( - Rotation.from_euler('xz', [sun_el, -sun_azm], degrees=True) - ) + v_sun = Vxyz((0, 1, 0)).rotate(Rotation.from_euler('xz', [sun_el, -sun_azm], degrees=True)) v_optic_targ = (targ_loc - optic_loc).normalize() v_pointing = (v_sun + v_optic_targ).normalize() rot_pointing = Vxyz((0, 0, 1)).align_to(v_pointing) @@ -164,9 +154,7 @@ def visualize_mirror_array() -> None: def define_mirror(focal_length: float) -> MirrorParametric: """Creates parametric mirror with given focal length""" - region_mirror = RegionXY.from_vertices( - Vxy(([-0.6, -0.6, 0.6, 0.6], [-0.6, 0.6, 0.6, -0.6])) - ) + region_mirror = RegionXY.from_vertices(Vxy(([-0.6, -0.6, 0.6, 0.6], [-0.6, 0.6, 0.6, -0.6]))) return MirrorParametric.generate_symmetric_paraboloid(focal_length, region_mirror) @@ -228,31 +216,21 @@ def ray_trace_obj( return image, trace -def plot_ray_trace( - scene: Scene, - image: np.ndarray, - trace: rt.RayTrace, - title: str, - plot_rays: bool = False, -) -> None: +def plot_ray_trace(scene: Scene, image: np.ndarray, trace: rt.RayTrace, title: str, plot_rays: bool = False) -> None: """Plots and saves images""" # Define save directory save_dir = os.path.join(os.path.dirname(__file__), 'data/output') # Define visualization controls figure_control = rcfg.RenderControlFigure(tile_array=(2, 1), tile_square=True) - mirror_control = rcm.RenderControlMirror( - centroid=True, surface_normals=True, norm_res=1 - ) + mirror_control = rcm.RenderControlMirror(centroid=True, surface_normals=True, norm_res=1) axis_control_m = rca.meters() if plot_rays: light_path_control = RenderControlLightPath(current_length=10) ray_trace_control = RenderControlRayTrace(light_path_control=light_path_control) # Plot scenario - fig_record = fm.setup_figure_for_3d_data( - figure_control, axis_control_m, title=title + ': Ray Trace' - ) + fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, title=title + ': Ray Trace') if plot_rays: trace.draw(fig_record.view, ray_trace_control) scene.objects[0].draw(fig_record.view, mirror_control) @@ -260,9 +238,7 @@ def plot_ray_trace( fig_record.save(save_dir, 'ray_trace_' + title, 'png') # Plot image - fig_record = fm.setup_figure( - figure_control, axis_control_m, title=title + ': Sun Image' - ) + fig_record = fm.setup_figure(figure_control, axis_control_m, title=title + ': Sun Image') fig_record.axis.imshow(image, cmap='jet') fig_record.save(save_dir, 'sun_image_' + title, 'png') diff --git a/example/mirror/example_MirrorOutput.py b/example/mirror/example_MirrorOutput.py index 82fe3273..ccbada0e 100644 --- a/example/mirror/example_MirrorOutput.py +++ b/example/mirror/example_MirrorOutput.py @@ -21,9 +21,7 @@ import opencsp.common.lib.tool.string_tools as st from opencsp.common.lib.csp.ufacet.Facet import Facet from opencsp.common.lib.csp.ufacet.Heliostat import Heliostat -from opencsp.common.lib.csp.MirrorParametricRectangular import ( - MirrorParametricRectangular, -) +from opencsp.common.lib.csp.MirrorParametricRectangular import MirrorParametricRectangular from opencsp.common.lib.csp.MirrorParametric import MirrorParametric from opencsp.common.lib.csp.SolarField import SolarField @@ -59,16 +57,8 @@ def setup_class( self.m1_len_y = 3.0 # m self.m1_rectangle_xy = (self.m1_len_x, self.m1_len_y) self.m1 = MirrorParametricRectangular(self.m1_fxn, self.m1_rectangle_xy) - self.m1_shape_description = ( - 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' - ) - self.m1_title = ( - 'Mirror (' - + self.m1_shape_description - + ', f=' - + str(self.m1_focal_length) - + 'm), Face Up' - ) + self.m1_shape_description = 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' + self.m1_title = 'Mirror (' + self.m1_shape_description + ', f=' + str(self.m1_focal_length) + 'm), Face Up' self.m1_caption = ( 'A single mirror of shape (' + self.m1_shape_description @@ -108,26 +98,16 @@ def setup_class( # Set canting angles. cos5 = np.cos(np.deg2rad(8)) sin5 = np.sin(np.deg2rad(8)) - tilt_up = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]]) - ) - tilt_down = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]]) - ) - tilt_left = Rotation.from_matrix( - np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]]) - ) - tilt_right = Rotation.from_matrix( - np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]]) - ) + tilt_up = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]])) + tilt_down = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]])) + tilt_left = Rotation.from_matrix(np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]])) + tilt_right = Rotation.from_matrix(np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]])) self.h2x2_f1.canting = tilt_left * tilt_up self.h2x2_f2.canting = tilt_right * tilt_up self.h2x2_f3.canting = tilt_left * tilt_down self.h2x2_f4.canting = tilt_right * tilt_down self.h2x2_facets = [self.h2x2_f1, self.h2x2_f2, self.h2x2_f3, self.h2x2_f4] - self.h2x2 = Heliostat( - 'Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0 - ) + self.h2x2 = Heliostat('Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0) self.h2x2_title = 'Heliostat with Parametrically Defined Facets' self.h2x2_caption = ( 'Heliostat with four facets (' @@ -139,37 +119,15 @@ def setup_class( self.h2x2_comments = [] # Simple solar field, with two simple heliostats. - self.sf2x2_h1 = Heliostat( - 'Heliostat 1', - [0, 0, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) - self.sf2x2_h2 = Heliostat( - 'Heliostat 2', - [0, 10, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) + self.sf2x2_h1 = Heliostat('Heliostat 1', [0, 0, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) + self.sf2x2_h2 = Heliostat('Heliostat 2', [0, 10, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) self.sf2x2_heliostats = [self.sf2x2_h1, self.sf2x2_h2] - self.sf2x2 = SolarField( - 'Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats - ) + self.sf2x2 = SolarField('Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats) self.sf2x2_title = 'Two Heliostats' self.sf2x2_caption = 'Two 4-facet heliostats, tracking.' self.sf2x2_comments = [] - def lambda_symmetric_paraboloid( - self, focal_length: float - ) -> Callable[[float, float], float]: + def lambda_symmetric_paraboloid(self, focal_length: float) -> Callable[[float, float], float]: """Returns a callable for a symmetric paraboloid surface Parameters @@ -278,9 +236,7 @@ def example_facet(self) -> None: draw_surface_normal=False, draw_surface_normal_at_corners=True, ) - local_comments.append( - 'Render mirror surface with normals, facet outline with corner normals.' - ) + local_comments.append('Render mirror surface with normals, facet outline with corner normals.') # Draw. fig_record = fm.setup_figure_for_3d_data( @@ -329,9 +285,7 @@ def example_heliostat_surface_normals(self) -> None: facet_styles=facet_control, draw_facets=True, ) - local_comments.append( - 'Render mirror surfaces and surface normals, facet normals, and heliostat outline.' - ) + local_comments.append('Render mirror surfaces and surface normals, facet normals, and heliostat outline.') # Draw. fig_record = fm.setup_figure_for_3d_data( @@ -361,25 +315,16 @@ def example_solar_field(self) -> None: # Set configurations. self.sf2x2_h1.set_configuration(hc.face_west()) - local_comments.append( - 'Heliostat 1 oriented initially face west.' - ) # Overriden by tracking below. + local_comments.append('Heliostat 1 oriented initially face west.') # Overriden by tracking below. self.sf2x2_h2.set_configuration(hc.face_south()) - local_comments.append( - 'Heliostat 2 oriented initially face south.' - ) # Overriden by tracking below. + local_comments.append('Heliostat 2 oriented initially face south.') # Overriden by tracking below. # Define tracking time. aimpoint_xyz = [60.0, 8.8, 28.9] # year, month, day, hour, minute, second, zone] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] self.sf2x2.set_full_field_tracking(aimpoint_xyz, when_ymdhmsz) - local_comments.append( - 'Heliostats set to track to ' - + str(aimpoint_xyz) - + ' at ymdhmsz =' - + str(when_ymdhmsz) - ) + local_comments.append('Heliostats set to track to ' + str(aimpoint_xyz) + ' at ymdhmsz =' + str(when_ymdhmsz)) # Setup render control. mirror_control = rcm.RenderControlMirror(surface_normals=False) @@ -402,9 +347,7 @@ def example_solar_field(self) -> None: solar_field_control = rcsf.RenderControlSolarField( heliostat_styles=rce.RenderControlEnsemble(heliostat_control) ) - local_comments.append( - 'Render mirror surfaces, facet centroids, and heliostat outline and surface normal.' - ) + local_comments.append('Render mirror surfaces, facet centroids, and heliostat outline and surface normal.') # Draw. fig_record = fm.setup_figure_for_3d_data( @@ -443,11 +386,7 @@ def example_heliostat_05W01_and_14W01(self) -> None: focal_length_5W01 = 55 # meters name_5W01 = '5W01' title_5W01 = 'NSTTF Heliostat ' + name_5W01 - caption_5W01 = ( - '5W01 modeled as a symmetric paraboloid with focal length f=' - + str(focal_length_5W01) - + 'm.' - ) + caption_5W01 = '5W01 modeled as a symmetric paraboloid with focal length f=' + str(focal_length_5W01) + 'm.' # 14W01. x_14W01 = -4.88 # meters # TODO RCB: FETCH FROM DEFINITION FILE y_14W01 = 194.71 # meters # TODO RCB: FETCH FROM DEFINITION FILE @@ -455,11 +394,7 @@ def example_heliostat_05W01_and_14W01(self) -> None: focal_length_14W01 = 186.8 # meters name_14W01 = '14W01' title_14W01 = 'NSTTF Heliostat ' + name_14W01 - caption_14W01 = ( - '14W01 modeled as a symmetric paraboloid with focal length f=' - + str(focal_length_14W01) - + 'm.' - ) + caption_14W01 = '14W01 modeled as a symmetric paraboloid with focal length f=' + str(focal_length_14W01) + 'm.' # Solar field. short_name_sf = 'Mini NSTTF' name_sf = 'Mini NSTTF with ' + name_5W01 + ' and ' + name_14W01 @@ -516,23 +451,15 @@ def fn_14W01(x, y): sf = SolarField(name_sf, short_name_sf, [-106.509606, 34.962276], heliostats) - comments_long = ( - comments.copy() - ) # We'll add a different comment for the plots with long normals. - comments_very_long = ( - comments.copy() - ) # We'll add a different comment for the plots with very long normals. + comments_long = comments.copy() # We'll add a different comment for the plots with long normals. + comments_very_long = comments.copy() # We'll add a different comment for the plots with very long normals. comments_exaggerated_z = ( comments.copy() ) # We'll add a different comment for the plots with an exaggerated z axis. - comments.append( - 'Render mirror surfaces and normals, facet outlines, and heliostat centroid.' - ) + comments.append('Render mirror surfaces and normals, facet outlines, and heliostat centroid.') # Setup render control (long normals). - mirror_control_long = rcm.RenderControlMirror( - surface_normals=True, norm_len=12, norm_res=3, resolution=3 - ) + mirror_control_long = rcm.RenderControlMirror(surface_normals=True, norm_len=12, norm_res=3, resolution=3) facet_control_long = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control_long, @@ -551,9 +478,7 @@ def fn_14W01(x, y): draw_facets=True, ) - comments_long.append( - 'Render mirror surfaces and long normals, facet outlines, and heliostat centroid.' - ) + comments_long.append('Render mirror surfaces and long normals, facet outlines, and heliostat centroid.') # Draw and output 5W01 figure (long normals, xy view). fig_record = fm.setup_figure_for_3d_data( @@ -590,9 +515,7 @@ def fn_14W01(x, y): # Setup render control (very long normals). mirror_control_very_long = rcm.RenderControlMirror( surface_normals=True, - norm_len=( - 2 * focal_length_14W01 - ), # Twice the focal length is the center of curvature. + norm_len=(2 * focal_length_14W01), # Twice the focal length is the center of curvature. norm_res=2, resolution=3, ) @@ -640,31 +563,13 @@ def fn_14W01(x, y): z_exaggerated_margin = 0.35 # meters, plus or minus reference height. decimal_factor = 100.0 # Different z limits for each heliostat, because they are at different elevations on the sloped field. - z_min_5W01 = ( - np.floor( - decimal_factor * ((z_5W01 + nsttf_pivot_offset) - z_exaggerated_margin) - ) - / decimal_factor - ) - z_max_5W01 = ( - np.ceil( - decimal_factor * ((z_5W01 + nsttf_pivot_offset) + z_exaggerated_margin) - ) - / decimal_factor - ) + z_min_5W01 = np.floor(decimal_factor * ((z_5W01 + nsttf_pivot_offset) - z_exaggerated_margin)) / decimal_factor + z_max_5W01 = np.ceil(decimal_factor * ((z_5W01 + nsttf_pivot_offset) + z_exaggerated_margin)) / decimal_factor exaggerated_z_limits_5W01 = [z_min_5W01, z_max_5W01] z_min_14W01 = ( - np.floor( - decimal_factor * ((z_14W01 + nsttf_pivot_offset) - z_exaggerated_margin) - ) - / decimal_factor - ) - z_max_14W01 = ( - np.ceil( - decimal_factor * ((z_14W01 + nsttf_pivot_offset) + z_exaggerated_margin) - ) - / decimal_factor + np.floor(decimal_factor * ((z_14W01 + nsttf_pivot_offset) - z_exaggerated_margin)) / decimal_factor ) + z_max_14W01 = np.ceil(decimal_factor * ((z_14W01 + nsttf_pivot_offset) + z_exaggerated_margin)) / decimal_factor exaggerated_z_limits_14W01 = [z_min_14W01, z_max_14W01] mirror_control_exaggerated_z = rcm.RenderControlMirror(surface_normals=False) facet_control_exaggerated_z = rcf.RenderControlFacet( @@ -717,7 +622,9 @@ def fn_14W01(x, y): code_tag=self.code_tag, ) fig_record.equal = False # Asserting equal axis scales contradicts exaggerated z limits in 2-d plots. - fig_record.z_limits = exaggerated_z_limits_5W01 # Limits are on z values, even though the plot is 2-d. View3d.py handles this. + fig_record.z_limits = ( + exaggerated_z_limits_5W01 # Limits are on z values, even though the plot is 2-d. View3d.py handles this. + ) h_5W01.draw(fig_record.view, heliostat_control_exaggerated_z) self.show_save_and_check_figure(fig_record) @@ -752,7 +659,9 @@ def fn_14W01(x, y): code_tag=self.code_tag, ) fig_record.equal = False # Asserting equal axis scales contradicts exaggerated z limits in 2-d plots. - fig_record.z_limits = exaggerated_z_limits_14W01 # Limits are on z values, even though the plot is 2-d. View3d.py handles this. + fig_record.z_limits = ( + exaggerated_z_limits_14W01 # Limits are on z values, even though the plot is 2-d. View3d.py handles this. + ) h_14W01.draw(fig_record.view, heliostat_control_exaggerated_z) self.show_save_and_check_figure(fig_record) @@ -809,9 +718,7 @@ def fn(x, y): when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # Setup render control. - mirror_control = rcm.RenderControlMirror( - surface_normals=True, norm_len=4, norm_res=2, resolution=3 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=True, norm_len=4, norm_res=2, resolution=3) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control, @@ -887,12 +794,7 @@ def fn(x, y): # Tracking heliostat. sf.set_full_field_tracking(aimpoint_xyz, when_ymdhmsz) - comments.append( - 'Heliostats set to track to ' - + str(aimpoint_xyz) - + ' at ymdhmsz =' - + str(when_ymdhmsz) - ) + comments.append('Heliostats set to track to ' + str(aimpoint_xyz) + ' at ymdhmsz =' + str(when_ymdhmsz)) fig_record = fm.setup_figure_for_3d_data( self.figure_control, self.axis_control_m, diff --git a/example/raytrace/example_RayTraceOutput.py b/example/raytrace/example_RayTraceOutput.py index caed85e1..9a737efe 100644 --- a/example/raytrace/example_RayTraceOutput.py +++ b/example/raytrace/example_RayTraceOutput.py @@ -40,9 +40,7 @@ from opencsp.common.lib.csp.ufacet.Heliostat import Heliostat from opencsp.common.lib.csp.LightPath import LightPath from opencsp.common.lib.csp.LightSourceSun import LightSourceSun -from opencsp.common.lib.csp.MirrorParametricRectangular import ( - MirrorParametricRectangular, -) +from opencsp.common.lib.csp.MirrorParametricRectangular import MirrorParametricRectangular from opencsp.common.lib.csp.Scene import Scene from opencsp.common.lib.csp.SolarField import SolarField from opencsp.common.lib.geometry.Pxyz import Pxyz @@ -50,19 +48,11 @@ from opencsp.common.lib.geometry.Vxyz import Vxyz from opencsp.common.lib.render.View3d import View3d from opencsp.common.lib.render_control.RenderControlAxis import RenderControlAxis -from opencsp.common.lib.render_control.RenderControlEnsemble import ( - RenderControlEnsemble, -) +from opencsp.common.lib.render_control.RenderControlEnsemble import RenderControlEnsemble from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) -from opencsp.common.lib.render_control.RenderControlLightPath import ( - RenderControlLightPath, -) -from opencsp.common.lib.render_control.RenderControlRayTrace import ( - RenderControlRayTrace, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord +from opencsp.common.lib.render_control.RenderControlLightPath import RenderControlLightPath +from opencsp.common.lib.render_control.RenderControlRayTrace import RenderControlRayTrace from opencsp.common.lib.render_control.RenderControlSurface import RenderControlSurface @@ -94,16 +84,8 @@ def setup_class( self.m1_len_y = 3.0 # m self.m1_rectangle_xy = (self.m1_len_x, self.m1_len_y) self.m1 = MirrorParametricRectangular(self.m1_fxn, self.m1_rectangle_xy) - self.m1_shape_description = ( - 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' - ) - self.m1_title = ( - 'Mirror (' - + self.m1_shape_description - + ', f=' - + str(self.m1_focal_length) - + 'm), Face Up' - ) + self.m1_shape_description = 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' + self.m1_title = 'Mirror (' + self.m1_shape_description + ', f=' + str(self.m1_focal_length) + 'm), Face Up' self.m1_caption = ( 'A single mirror of shape (' + self.m1_shape_description @@ -134,26 +116,16 @@ def setup_class( # Set canting angles. cos5 = np.cos(np.deg2rad(8)) sin5 = np.sin(np.deg2rad(8)) - tilt_up = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]]) - ) - tilt_down = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]]) - ) - tilt_left = Rotation.from_matrix( - np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]]) - ) - tilt_right = Rotation.from_matrix( - np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]]) - ) + tilt_up = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]])) + tilt_down = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]])) + tilt_left = Rotation.from_matrix(np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]])) + tilt_right = Rotation.from_matrix(np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]])) self.h2x2_f1.canting = tilt_left * tilt_up self.h2x2_f2.canting = tilt_right * tilt_up self.h2x2_f3.canting = tilt_left * tilt_down self.h2x2_f4.canting = tilt_right * tilt_down self.h2x2_facets = [self.h2x2_f1, self.h2x2_f2, self.h2x2_f3, self.h2x2_f4] - self.h2x2 = Heliostat( - 'Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0 - ) + self.h2x2 = Heliostat('Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0) self.h2x2_title = 'Heliostat with Parametrically Defined Facets' self.h2x2_caption = ( 'Heliostat with four facets (' @@ -165,37 +137,15 @@ def setup_class( self.h2x2_comments = [] # Simple solar field, with two simple heliostats. - self.sf2x2_h1 = Heliostat( - 'Heliostat 1', - [0, 0, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) - self.sf2x2_h2 = Heliostat( - 'Heliostat 2', - [0, 10, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) + self.sf2x2_h1 = Heliostat('Heliostat 1', [0, 0, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) + self.sf2x2_h2 = Heliostat('Heliostat 2', [0, 10, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) self.sf2x2_heliostats = [self.sf2x2_h1, self.sf2x2_h2] - self.sf2x2 = SolarField( - 'Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats - ) + self.sf2x2 = SolarField('Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats) self.sf2x2_title = 'Two Heliostats' self.sf2x2_caption = 'Two 4-facet heliostats, tracking.' self.sf2x2_comments = [] - def lambda_symmetric_paraboloid( - self, focal_length: float - ) -> Callable[[float, float], float]: + def lambda_symmetric_paraboloid(self, focal_length: float) -> Callable[[float, float], float]: """ Helper function that makes lambdas of paraboloids of a given focal length. """ @@ -229,9 +179,7 @@ def example_draw_simple_ray(self) -> None: incoming_vector = Vxyz([0, 1, -1]) ref_vec = rt.calc_reflected_ray(normal_vector, incoming_vector) ray = LightPath(points, incoming_vector, ref_vec) - light_path_control = RenderControlLightPath( - line_render_control=rcps.RenderControlPointSeq(color='y') - ) + light_path_control = RenderControlLightPath(line_render_control=rcps.RenderControlPointSeq(color='y')) ray.draw(view, light_path_control) # Output. @@ -258,9 +206,7 @@ def example_mirror_trace(self) -> None: # Face Up, Parallel Beams 3d - ls.incident_rays = LightPath.many_rays_from_many_vectors( - None, Vxyz([0, 0, -1]) - ) # straight down + ls.incident_rays = LightPath.many_rays_from_many_vectors(None, Vxyz([0, 0, -1])) # straight down m1.set_position_in_space(tran, rot_id) @@ -306,9 +252,7 @@ def example_mirror_trace(self) -> None: ) view1_yz = fig_record.view - trace1.draw( - view1_yz, RenderControlRayTrace(light_path_control=light_path_control) - ) + trace1.draw(view1_yz, RenderControlRayTrace(light_path_control=light_path_control)) m1.draw(view1_yz, mirror_control) # Output. @@ -316,9 +260,7 @@ def example_mirror_trace(self) -> None: # 45 Degree Rotation, Parallel Beams 3d - ls.incident_rays = LightPath.many_rays_from_many_vectors( - None, Uxyz([0, 1, -1]) - ) # coming at a 45 degree angle + ls.incident_rays = LightPath.many_rays_from_many_vectors(None, Uxyz([0, 1, -1])) # coming at a 45 degree angle m1.set_position_in_space(tran, rot_45_deg) @@ -363,9 +305,7 @@ def example_mirror_trace(self) -> None: code_tag=self.code_tag, ) view2_yz = fig_record.view - trace2.draw( - view2_yz, RenderControlRayTrace(light_path_control=light_path_control) - ) + trace2.draw(view2_yz, RenderControlRayTrace(light_path_control=light_path_control)) m1.draw(view2_yz, mirror_control) # Output. @@ -374,9 +314,7 @@ def example_mirror_trace(self) -> None: # Face Up, Cone of light rays, 3d # set of inc vectors to test - example_vecs = Uxyz( - [[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]] - ) + example_vecs = Uxyz([[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]]) ls.incident_rays = LightPath.many_rays_from_many_vectors(None, example_vecs) @@ -431,9 +369,7 @@ def example_mirror_trace(self) -> None: # 45 degree rotation, cone of beams 3d - ls.incident_rays = LightPath.many_rays_from_many_vectors( - None, example_vecs.rotate(rot_45_deg) - ) + ls.incident_rays = LightPath.many_rays_from_many_vectors(None, example_vecs.rotate(rot_45_deg)) m1.set_position_in_space(tran, rot_45_deg) @@ -478,9 +414,7 @@ def example_mirror_trace(self) -> None: ) view4_yz = fig_record.view - trace4.draw( - view4_yz, RenderControlRayTrace(light_path_control=light_path_control) - ) + trace4.draw(view4_yz, RenderControlRayTrace(light_path_control=light_path_control)) m1.draw(view4_yz, mirror_control) # Output. @@ -518,9 +452,7 @@ def h_func(x, y): facet_height=1.2192, default_mirror_shape=flat_func, ) - sf_flat = sf.SolarField( - "mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_flat] - ) + sf_flat = sf.SolarField("mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_flat]) h_curved = helio.h_from_facet_centroids( "NSTTF Heliostat 05W01", @@ -535,9 +467,7 @@ def h_func(x, y): facet_height=1.2192, default_mirror_shape=h_func, ) - sf_curved = sf.SolarField( - "mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_curved] - ) + sf_curved = sf.SolarField("mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_curved]) h_canted = helio.h_from_facet_centroids( "NSTTF Heliostat 05W01", @@ -553,13 +483,9 @@ def h_func(x, y): default_mirror_shape=h_func, ) h_canted.set_canting_from_equation(h_func) - sf_canted = sf.SolarField( - "mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_canted] - ) + sf_canted = sf.SolarField("mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_canted]) - mirror_control = rcm.RenderControlMirror( - surface_normals=False, norm_len=8, norm_res=2, resolution=3 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=False, norm_len=8, norm_res=2, resolution=3) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control, @@ -594,9 +520,7 @@ def h_func(x, y): # RAY TRACING # set of inc vectors to test - example_vecs = Uxyz( - [[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]] - ) + example_vecs = Uxyz([[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]]) sun = LightSourceSun() # sun.set_incident_rays(loc, when_ymdhmsz, 3) @@ -706,9 +630,7 @@ def h_func(x, y): def example_changing_time_of_day(self) -> None: # create a figure that shows 5w1 reflecting the sun towards an aimpoint -- TODO tjlarki: sun rays coming from wrong direction - def _heliostat_at_moment( - name: str, aimpoint_xyz: tuple, when_ymdhmsz: tuple, i: int - ) -> None: + def _heliostat_at_moment(name: str, aimpoint_xyz: tuple, when_ymdhmsz: tuple, i: int) -> None: self.start_test() local_comments = [] @@ -733,13 +655,9 @@ def fn_5w1(x, y): heliostats = [h_05w01] - sf1 = sf.SolarField( - "mini Nsttf with 5W1", "mini Field", lln.NSTTF_ORIGIN, heliostats - ) + sf1 = sf.SolarField("mini Nsttf with 5W1", "mini Field", lln.NSTTF_ORIGIN, heliostats) - mirror_control = rcm.RenderControlMirror( - surface_normals=False, norm_len=8, norm_res=2, resolution=3 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=False, norm_len=8, norm_res=2, resolution=3) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control, @@ -773,9 +691,7 @@ def _draw_helper(view: View3d) -> None: scene.add_object(sf1) scene.add_light_source(sun) - path_control = RenderControlLightPath( - current_length=100, init_length=20 - ) + path_control = RenderControlLightPath(current_length=100, init_length=20) trace_control = RenderControlRayTrace(light_path_control=path_control) trace = rt.trace_scene(scene, obj_resolution=1) @@ -815,15 +731,11 @@ def _draw_helper(view: View3d) -> None: view_xz = fig_record.view _draw_helper(view_xz) - _heliostat_at_moment( - "5w1 at 11:02", [60.0, 8.8, 28.9], (2021, 5, 13, 11, 2, 0, -6), 16 - ) + _heliostat_at_moment("5w1 at 11:02", [60.0, 8.8, 28.9], (2021, 5, 13, 11, 2, 0, -6), 16) # _heliostat_at_moment("5w1 at 12:02", [60.0,8.8,28.9], (2021,5,13,12,2,0,-6), 18) # _heliostat_at_moment("5w1 at 13:02", [60.0,8.8,28.9], (2021,5,13,13,2,0,-6), 20) # _heliostat_at_moment("5w1 at 14:02", [60.0,8.8,28.9], (2021,5,13,14,2,0,-6), 22) - _heliostat_at_moment( - "5w1 at 15:02", [60.0, 8.8, 28.9], (2021, 5, 13, 15, 2, 0, -6), 24 - ) + _heliostat_at_moment("5w1 at 15:02", [60.0, 8.8, 28.9], (2021, 5, 13, 15, 2, 0, -6), 24) # test__changing-time_of_day_helper("5w1 at solar noon, pointing at origin",[0,0,28.9], (2021,5,13,13,2,0,-6), figure_control, axis_control_m) return @@ -858,34 +770,24 @@ def example_partial_field_trace(self) -> None: facet_centroids_file=dpft.sandia_nsttf_test_facet_centroidsfile(), autoset_canting_and_curvature=aimpoint_xyz, ) - solar_field.heliostats = solar_field.heliostats[ - 0:215:13 - ] # only keeps first 15 heliostats + solar_field.heliostats = solar_field.heliostats[0:215:13] # only keeps first 15 heliostats # Tracking setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_outlines(color='b') # Comment fig_record.comments.append("Partial Solar Field Trace.") - fig_record.comments.append( - "Using 1 ray per surface normal and one surface normal per mirror." - ) + fig_record.comments.append("Using 1 ray per surface normal and one surface normal per mirror.") fig_record.comments.append( "Mirror curvature and canting is defined per heliostat. They are both parabolic and have focal lengths based on the distance of the heliostat to the tower." ) - fig_record.comments.append( - "Traces one in every 13 heliostats in the NSTTF field." - ) + fig_record.comments.append("Traces one in every 13 heliostats in the NSTTF field.") # Draw - mirror_control = rcm.RenderControlMirror( - surface_normals=False, norm_len=8, norm_res=2, resolution=2 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=False, norm_len=8, norm_res=2, resolution=2) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=False, mirror_styles=mirror_control, @@ -908,9 +810,7 @@ def example_partial_field_trace(self) -> None: ) solar_field.draw(view, solar_field_style) - view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') sun = LightSourceSun() sun.set_incident_rays(lln.NSTTF_ORIGIN, when_ymdhmsz, 1) @@ -922,9 +822,7 @@ def example_partial_field_trace(self) -> None: trace = rt.trace_scene(scene, 1, verbose=True) trace.draw(view, RenderControlRayTrace(RenderControlLightPath(15, 200))) - view.draw_xyz( - aimpoint_xyz, rcps.RenderControlPointSeq(color='orange', marker='.') - ) + view.draw_xyz(aimpoint_xyz, rcps.RenderControlPointSeq(color='orange', marker='.')) self.show_save_and_check_figure(fig_record) diff --git a/example/scene_reconstruction/example_annotate_aruco_markers.py b/example/scene_reconstruction/example_annotate_aruco_markers.py index b86776e4..25604472 100644 --- a/example/scene_reconstruction/example_annotate_aruco_markers.py +++ b/example/scene_reconstruction/example_annotate_aruco_markers.py @@ -12,11 +12,7 @@ def annotate_aruco_markers( - source_pattern: str, - save_dir: str, - line_width: int = 1, - font_thickness: int = 2, - font_scale: float = 2, + source_pattern: str, save_dir: str, line_width: int = 1, font_thickness: int = 2, font_scale: float = 2 ): """Finds aruco markers, annotates edges, labels, and saves into destination folder @@ -70,15 +66,7 @@ def annotate_aruco_markers( # Add text orig = (int(pt_img[:, 0].min()), int(pt_img[:, 1].min() - 5)) - cv.putText( - img_rgb, - str(id_), - orig, - font_type, - font_scale, - (0, 0, 255), - font_thickness, - ) + cv.putText(img_rgb, str(id_), orig, font_type, font_scale, (0, 0, 255), font_thickness) # Save image save_name = basename(file) @@ -91,9 +79,7 @@ def example_annotate_aruco_markers(): the given source_pattern. Markers are outlined in red and labeled in blue text. """ source_pattern = join( - opencsp_code_dir(), - 'app/scene_reconstruction/test/data', - 'data_measurement/aruco_marker_images/DSC0365*.JPG', + opencsp_code_dir(), 'app/scene_reconstruction/test/data', 'data_measurement/aruco_marker_images/DSC0365*.JPG' ) save_dir = join(dirname(__file__), 'data/output/annotated_aruco_markers') diff --git a/example/scene_reconstruction/example_make_aruco_markers.py b/example/scene_reconstruction/example_make_aruco_markers.py index abe63f2e..260c120e 100644 --- a/example/scene_reconstruction/example_make_aruco_markers.py +++ b/example/scene_reconstruction/example_make_aruco_markers.py @@ -39,21 +39,11 @@ def make_aruco_images(save_path: str, number: str, size: int = 500, padding: int cv.aruco.drawMarker(dictionary, id_, side_pixels, img[-side_pixels:, :]) # Add padding - img = np.pad( - img, ((pad_width, pad_width), (pad_width, pad_width)), constant_values=255 - ) + img = np.pad(img, ((pad_width, pad_width), (pad_width, pad_width)), constant_values=255) # Add text id_string = f'ID: {id_:d}' - cv.putText( - img, - id_string, - (20, side_pixels + 2 * pad_width - 4), - 0, - 1, - 0, - font_thickness, - ) + cv.putText(img, id_string, (20, side_pixels + 2 * pad_width - 4), 0, 1, 0, font_thickness) # Save image imageio.imwrite(join(f'{save_path:s}', f'{id_:03d}.png'), img) diff --git a/example/scene_reconstruction/example_scene_reconstruction.py b/example/scene_reconstruction/example_scene_reconstruction.py index 25659a53..4802b21d 100644 --- a/example/scene_reconstruction/example_scene_reconstruction.py +++ b/example/scene_reconstruction/example_scene_reconstruction.py @@ -13,9 +13,7 @@ def example_scene_reconstruction(save_dir: str): """Example script that reconstructs the XYZ locations of Aruco markers in a scene.""" # Define input directory - dir_input = join( - opencsp_code_dir(), 'app/scene_reconstruction/test/data/data_measurement' - ) + dir_input = join(opencsp_code_dir(), 'app/scene_reconstruction/test/data/data_measurement') # Load components camera = Camera.load_from_hdf(join(dir_input, 'camera.h5')) @@ -25,9 +23,7 @@ def example_scene_reconstruction(save_dir: str): alignment_points = np.loadtxt(join(dir_input, 'alignment_points.csv'), delimiter=',', skiprows=1) # Perform marker position calibration - cal_scene_recon = SceneReconstruction( - camera, known_point_locations, image_filter_path - ) + cal_scene_recon = SceneReconstruction(camera, known_point_locations, image_filter_path) cal_scene_recon.make_figures = True cal_scene_recon.run_calibration() diff --git a/example/sofast_fixed/calculate_dot_locations_from_display_object.py b/example/sofast_fixed/calculate_dot_locations_from_display_object.py index 260a8793..9260468e 100644 --- a/example/sofast_fixed/calculate_dot_locations_from_display_object.py +++ b/example/sofast_fixed/calculate_dot_locations_from_display_object.py @@ -16,16 +16,11 @@ def example_calculate_dot_locs_from_display(): """Creates a DotLocationsFixedPattern object from a previously created Display object""" - dir_base = join( - opencsp_code_dir(), - '../../sample_data/deflectometry/sandia_lab/calibration_files', - ) + dir_base = join(opencsp_code_dir(), '../../sample_data/deflectometry/sandia_lab/calibration_files') # Define files file_display = join(dir_base, 'display_distorted_3d.h5') - file_image_projection = join( - dir_base, 'Image_Projection_optics_lab_landscape_square.h5' - ) + file_image_projection = join(dir_base, 'Image_Projection_optics_lab_landscape_square.h5') dir_save = join(dirname(__file__), 'data/output/dot_location_file') # Define dot parameters @@ -40,20 +35,14 @@ def example_calculate_dot_locs_from_display(): im_proj_params = ImageProjection.load_from_hdf(file_image_projection) # Calculate fixed pattern display parameters - projection = SystemSofastFixed( - im_proj_params['size_x'], im_proj_params['size_y'], width_dot, spacing_dot - ) - fixed_pattern_dot_locs = DotLocationsFixedPattern.from_projection_and_display( - projection, display - ) + projection = SystemSofastFixed(im_proj_params['size_x'], im_proj_params['size_y'], width_dot, spacing_dot) + fixed_pattern_dot_locs = DotLocationsFixedPattern.from_projection_and_display(projection, display) # Calculate spatial orientation orientation = SpatialOrientation(display.r_cam_screen, display.v_cam_screen_cam) # Save data sets - fixed_pattern_dot_locs.save_to_hdf( - join(dir_save, f'fixed_pattern_display_w{width_dot:d}_s{spacing_dot:d}.h5') - ) + fixed_pattern_dot_locs.save_to_hdf(join(dir_save, f'fixed_pattern_display_w{width_dot:d}_s{spacing_dot:d}.h5')) orientation.save_to_hdf(join(dir_save, 'spatial_orientation.h5')) diff --git a/example/sofast_fixed/find_blobs_in_image.py b/example/sofast_fixed/find_blobs_in_image.py index 2a0c88a8..374db3b1 100644 --- a/example/sofast_fixed/find_blobs_in_image.py +++ b/example/sofast_fixed/find_blobs_in_image.py @@ -17,9 +17,7 @@ def example_find_blobs_in_image(): opencsp_code_dir(), '../../sample_data/deflectometry/sandia_lab/fixed_pattern/measurement_screen_square_width3_space6.h5', ) - file_save = join( - dirname(__file__), 'data/output/blob_detection/image_with_detected_blobs.png' - ) + file_save = join(dirname(__file__), 'data/output/blob_detection/image_with_detected_blobs.png') if not exists(dirname(file_save)): os.makedirs(dirname(file_save)) diff --git a/example/sofast_fixed/physical_target_dot_calibration.py b/example/sofast_fixed/physical_target_dot_calibration.py index b569ecaa..07167f4e 100644 --- a/example/sofast_fixed/physical_target_dot_calibration.py +++ b/example/sofast_fixed/physical_target_dot_calibration.py @@ -9,9 +9,7 @@ from opencsp.app.sofast.lib.CalibrateSofastFixedDots import CalibrateSofastFixedDots from opencsp.common.lib.camera.Camera import Camera -from opencsp.common.lib.deflectometry.CalibrationCameraPosition import ( - CalibrationCameraPosition, -) +from opencsp.common.lib.deflectometry.CalibrationCameraPosition import CalibrationCameraPosition from opencsp.app.sofast.lib.SpatialOrientation import SpatialOrientation from opencsp.common.lib.geometry.Vxy import Vxy from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -23,19 +21,14 @@ def example_perform_calibration(): """Performs a dot-location calibration using photogrammetry""" # Define dot location images and origins - base_dir = join( - opencsp_code_dir(), - 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements', - ) + base_dir = join(opencsp_code_dir(), 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements') files_cal_images = [ join(base_dir, 'images/DSC03965.JPG'), join(base_dir, 'images/DSC03967.JPG'), join(base_dir, 'images/DSC03970.JPG'), join(base_dir, 'images/DSC03972.JPG'), ] - origins = ( - np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 - ) + origins = np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 origins = Vxy(origins.astype(int)) # Define other files @@ -65,15 +58,7 @@ def example_perform_calibration(): # Perform dot location calibration cal_dot_locs = CalibrateSofastFixedDots( - files_cal_images, - origins, - camera_marker, - pts_xyz_corners, - ids_corners, - -32, - 31, - -31, - 32, + files_cal_images, origins, camera_marker, pts_xyz_corners, ids_corners, -32, 31, -31, 32 ) cal_dot_locs.plot = True cal_dot_locs.blob_search_threshold = 3.0 @@ -82,9 +67,7 @@ def example_perform_calibration(): cal_dot_locs.run() # Perform camera position calibration - cal_camera = CalibrationCameraPosition( - camera_system, pts_xyz_corners, ids_corners, image_camera_position - ) + cal_camera = CalibrationCameraPosition(camera_system, pts_xyz_corners, ids_corners, image_camera_position) cal_camera.verbose = 2 cal_camera.run_calibration() diff --git a/example/sofast_fixed/process_fixed_pattern_data.py b/example/sofast_fixed/process_fixed_pattern_data.py index 728d089f..264db9d6 100644 --- a/example/sofast_fixed/process_fixed_pattern_data.py +++ b/example/sofast_fixed/process_fixed_pattern_data.py @@ -33,9 +33,7 @@ def process( measurement = MeasurementSofastFixed.load_from_hdf(file_meas) # Instantiate class - fixed_pattern = ProcessSofastFixed( - orientation, camera, fixed_pattern_dot_locs, facet_data - ) + fixed_pattern = ProcessSofastFixed(orientation, camera, fixed_pattern_dot_locs, facet_data) fixed_pattern.load_measurement_data(measurement) # Process @@ -75,21 +73,10 @@ def example_process_fixed_pattern_printed_target(): dir_output = join(dirname(__file__), 'data/output/printed_target') surface_data = dict( - surface_type='parabolic', - initial_focal_lengths_xy=(150.0, 150), - robust_least_squares=False, - downsample=1, + surface_type='parabolic', initial_focal_lengths_xy=(150.0, 150), robust_least_squares=False, downsample=1 ) - process( - file_camera, - file_facet, - file_dot_locs, - file_ori, - file_meas, - dir_output, - surface_data, - ) + process(file_camera, file_facet, file_dot_locs, file_ori, file_meas, dir_output, surface_data) def example_process_fixed_pattern_screen_target(): @@ -100,30 +87,15 @@ def example_process_fixed_pattern_screen_target(): file_camera = join(dir_base, "calibration_files/camera.h5") file_facet = join(dir_base, "calibration_files/Facet_NSTTF.json") file_ori = join(dir_base, 'fixed_pattern/spatial_orientation.h5') - file_dot_locs = join( - dir_base, 'fixed_pattern/dot_locations_screen_square_width3_space6.h5' - ) - file_meas = join( - dir_base, 'fixed_pattern/measurement_screen_square_width3_space6.h5' - ) + file_dot_locs = join(dir_base, 'fixed_pattern/dot_locations_screen_square_width3_space6.h5') + file_meas = join(dir_base, 'fixed_pattern/measurement_screen_square_width3_space6.h5') dir_output = join(dirname(__file__), 'data/output/screen_target') surface_data = dict( - surface_type='parabolic', - initial_focal_lengths_xy=(150.0, 150), - robust_least_squares=False, - downsample=1, + surface_type='parabolic', initial_focal_lengths_xy=(150.0, 150), robust_least_squares=False, downsample=1 ) - process( - file_camera, - file_facet, - file_dot_locs, - file_ori, - file_meas, - dir_output, - surface_data, - ) + process(file_camera, file_facet, file_dot_locs, file_ori, file_meas, dir_output, surface_data) if __name__ == '__main__': diff --git a/example/sofast_fixed/run_and_characterize_fixed_pattern.py b/example/sofast_fixed/run_and_characterize_fixed_pattern.py index c79361ba..a836db3b 100644 --- a/example/sofast_fixed/run_and_characterize_fixed_pattern.py +++ b/example/sofast_fixed/run_and_characterize_fixed_pattern.py @@ -41,9 +41,7 @@ def process( ) -> None: """Performs processing of measured dot data""" # Process fixed pattern - fixed_pattern = ProcessSofastFixed( - orientation, camera, fixed_pattern_dot_locs, facet_data - ) + fixed_pattern = ProcessSofastFixed(orientation, camera, fixed_pattern_dot_locs, facet_data) fixed_pattern.load_measurement_data(measurement) fixed_pattern.process_single_facet_optic(surface_data) @@ -74,24 +72,15 @@ def run() -> None: # Load data file_camera = os.path.join(dir_calibration, "Camera_optics_lab_landscape.h5") - file_fixed_pattern_dot = os.path.join( - dir_calibration, "fixed_pattern_display_w3_s6.h5" - ) - file_spatial_orientation = os.path.join( - dir_calibration, "fixed_pattern_display_w3_s6.h5" - ) + file_fixed_pattern_dot = os.path.join(dir_calibration, "fixed_pattern_display_w3_s6.h5") + file_spatial_orientation = os.path.join(dir_calibration, "fixed_pattern_display_w3_s6.h5") file_facet_data = os.path.join(dir_calibration, "Facet_NSTTF.json") - file_image_projection = os.path.join( - dir_calibration, 'Image_Projection_optics_lab_landscape_square.h5' - ) + file_image_projection = os.path.join(dir_calibration, 'Image_Projection_optics_lab_landscape_square.h5') # Load ImageProjection and fixed pattern parameters image_projection = ImageProjection.load_from_hdf_and_display(file_image_projection) fixed_pattern = SystemSofastFixed( - image_projection.size_x, - image_projection.size_y, - width_pattern=3, - spacing_pattern=6, + image_projection.size_x, image_projection.size_y, width_pattern=3, spacing_pattern=6 ) image = fixed_pattern.get_image('uint8', 255) image_projection.display_image_in_active_area(image) @@ -99,42 +88,24 @@ def run() -> None: # Load other components camera = Camera.load_from_hdf(file_camera) spatial_orientation = SpatialOrientation.load_from_hdf(file_spatial_orientation) - fixed_pattern_dot_locs = DotLocationsFixedPattern.load_from_hdf( - file_fixed_pattern_dot - ) + fixed_pattern_dot_locs = DotLocationsFixedPattern.load_from_hdf(file_fixed_pattern_dot) # Define facet measurement setup facet_data = DefinitionFacet.load_from_json(file_facet_data) surface_data = dict( - surface_type='parabolic', - initial_focal_lengths_xy=(150.0, 150), - robust_least_squares=False, - downsample=1, + surface_type='parabolic', initial_focal_lengths_xy=(150.0, 150), robust_least_squares=False, downsample=1 ) def run_next(): - resp = input( - 'Measure (m), calibrate camera exposure (c), or stop (any other key): ' - ) + resp = input('Measure (m), calibrate camera exposure (c), or stop (any other key): ') if resp == 'm': # Capture image frame = image_acquisition.get_frame() # Process measurement = MeasurementSofastFixed( - frame, - v_measure_point_facet, - dist_optic_screen, - pt_origin, - name='NSTTF Facet', - ) - process( - fixed_pattern_dot_locs, - spatial_orientation, - camera, - facet_data, - measurement, - surface_data, + frame, v_measure_point_facet, dist_optic_screen, pt_origin, name='NSTTF Facet' ) + process(fixed_pattern_dot_locs, spatial_orientation, camera, facet_data, measurement, surface_data) # Continue or exit image_projection.root.after(200, run_next) elif resp == 'c': diff --git a/example/sofast_fixed/run_fixed_pattern_projection.py b/example/sofast_fixed/run_fixed_pattern_projection.py index a8cae6d1..c72af1a2 100644 --- a/example/sofast_fixed/run_fixed_pattern_projection.py +++ b/example/sofast_fixed/run_fixed_pattern_projection.py @@ -17,8 +17,7 @@ def example_project_fixed_pattern(): """Projects fixed pattern image on display""" # Set pattern parameters file_image_projection = join( - opencsp_code_dir(), - "test/data/measurements_sofast_fringe/general/Image_Projection_test.h5", + opencsp_code_dir(), "test/data/measurements_sofast_fringe/general/Image_Projection_test.h5" ) width_pattern = 3 spacing_pattern = 6 @@ -26,9 +25,7 @@ def example_project_fixed_pattern(): # Load ImageProjection im_proj = ImageProjection.load_from_hdf_and_display(file_image_projection) - fixed_pattern = SystemSofastFixed( - im_proj.size_x, im_proj.size_y, width_pattern, spacing_pattern - ) + fixed_pattern = SystemSofastFixed(im_proj.size_x, im_proj.size_y, width_pattern, spacing_pattern) image = fixed_pattern.get_image('uint8', 255, 'square') # Project image diff --git a/example/sofast_fringe/example_calibration_save_DisplayShape_file.py b/example/sofast_fringe/example_calibration_save_DisplayShape_file.py index 3df94901..30a071e6 100644 --- a/example/sofast_fringe/example_calibration_save_DisplayShape_file.py +++ b/example/sofast_fringe/example_calibration_save_DisplayShape_file.py @@ -18,9 +18,7 @@ def example_save_display_shape_file(): # Load screen distortion data file_screen_distortion_data = join( - opencsp_code_dir(), - 'app/sofast/test/data/data_expected', - 'screen_distortion_data_100_100.h5', + opencsp_code_dir(), 'app/sofast/test/data/data_expected', 'screen_distortion_data_100_100.h5' ) datasets = ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'] data = load_hdf5_datasets(datasets, file_screen_distortion_data) @@ -31,9 +29,7 @@ def example_save_display_shape_file(): # Load rvec and tvec file_rvec_tvec = join( - opencsp_code_dir(), - 'common/lib/deflectometry/test/data/data_expected', - 'camera_rvec_tvec.csv', + opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_expected', 'camera_rvec_tvec.csv' ) pose_data = np.loadtxt(file_rvec_tvec, delimiter=',') rvec = pose_data[0] diff --git a/example/sofast_fringe/example_calibration_screen_shape.py b/example/sofast_fringe/example_calibration_screen_shape.py index 42795422..de9c392d 100644 --- a/example/sofast_fringe/example_calibration_screen_shape.py +++ b/example/sofast_fringe/example_calibration_screen_shape.py @@ -14,13 +14,10 @@ def example_run_screen_shape_calibration(save_dir): - """Runs screen shape calibration. Saves data to ./data/output/screen_shape - """ + """Runs screen shape calibration. Saves data to ./data/output/screen_shape""" # Load output data from Scene Reconstruction (Aruco marker xyz points) file_pts_data = join( - opencsp_code_dir(), - 'common/lib/deflectometry/test/data/data_measurement', - 'point_locations.csv', + opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_measurement', 'point_locations.csv' ) pts_marker_data = np.loadtxt(file_pts_data, delimiter=',', skiprows=1) pts_xyz_marker = Vxyz(pts_marker_data[:, 2:].T) diff --git a/example/sofast_fringe/example_process_facet_ensemble.py b/example/sofast_fringe/example_process_facet_ensemble.py index 7636205d..40162cf6 100644 --- a/example/sofast_fringe/example_process_facet_ensemble.py +++ b/example/sofast_fringe/example_process_facet_ensemble.py @@ -55,11 +55,7 @@ def example_driver(dir_save: str): # Define surface data surface_data = [ - Surface2DParabolic( - initial_focal_lengths_xy=(100.0, 100.0), - robust_least_squares=False, - downsample=20, - ) + Surface2DParabolic(initial_focal_lengths_xy=(100.0, 100.0), robust_least_squares=False, downsample=20) ] * ensemble_data.num_facets # Calibrate fringes @@ -95,13 +91,7 @@ def example_driver(dir_save: str): # Visualize setup fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, title='') spatial_ori: SpatialOrientation = sofast.data_geometry_facet[0].spatial_orientation - visualize_setup( - display, - camera, - spatial_ori.v_screen_optic_screen, - spatial_ori.r_optic_screen, - ax=fig_record.axis, - ) + visualize_setup(display, camera, spatial_ori.v_screen_optic_screen, spatial_ori.r_optic_screen, ax=fig_record.axis) fig_record.save(dir_save, 'physical_setup_layout', 'png') # Plot scenario diff --git a/example/sofast_fringe/example_process_single_facet.py b/example/sofast_fringe/example_process_single_facet.py index 7b6651b1..6c1a0df8 100644 --- a/example/sofast_fringe/example_process_single_facet.py +++ b/example/sofast_fringe/example_process_single_facet.py @@ -47,11 +47,7 @@ def example_driver(dir_save: str): facet_data = DefinitionFacet.load_from_json(file_facet) # Define surface definition (parabolic surface) - surface = Surface2DParabolic( - initial_focal_lengths_xy=(300., 300.), - robust_least_squares=True, - downsample=10, - ) + surface = Surface2DParabolic(initial_focal_lengths_xy=(300.0, 300.0), robust_least_squares=True, downsample=10) # Calibrate fringes measurement.calibrate_fringe_images(calibration) @@ -65,8 +61,7 @@ def example_driver(dir_save: str): # Calculate focal length from parabolic fit surf_coefs = sofast.data_characterization_facet[0].surf_coefs_facet focal_lengths_xy = [1 / 4 / surf_coefs[2], 1 / 4 / surf_coefs[5]] - lt.info(f'Facet xy focal lengths (meters): ' - f'{focal_lengths_xy[0]:.3f}, {focal_lengths_xy[1]:.3f}') + lt.info(f'Facet xy focal lengths (meters): ' f'{focal_lengths_xy[0]:.3f}, {focal_lengths_xy[1]:.3f}') # Get optic representation facet: Facet = sofast.get_optic() @@ -78,13 +73,7 @@ def example_driver(dir_save: str): # Visualize setup fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, title='') spatial_ori: SpatialOrientation = sofast.data_geometry_facet[0].spatial_orientation - visualize_setup( - display, - camera, - spatial_ori.v_screen_optic_screen, - spatial_ori.r_optic_screen, - ax=fig_record.axis, - ) + visualize_setup(display, camera, spatial_ori.v_screen_optic_screen, spatial_ori.r_optic_screen, ax=fig_record.axis) fig_record.save(dir_save, 'physical_setup_layout', 'png') # Plot slope map diff --git a/example/sofast_fringe/example_process_undefined_shape.py b/example/sofast_fringe/example_process_undefined_shape.py index 0c94ec36..2b625e9e 100644 --- a/example/sofast_fringe/example_process_undefined_shape.py +++ b/example/sofast_fringe/example_process_undefined_shape.py @@ -44,11 +44,7 @@ def example_driver(dir_save): calibration = ImageCalibrationScaling.load_from_hdf(file_calibration) # Define surface definition (parabolic surface) - surface = Surface2DParabolic( - initial_focal_lengths_xy=(300., 300.), - robust_least_squares=True, - downsample=10, - ) + surface = Surface2DParabolic(initial_focal_lengths_xy=(300.0, 300.0), robust_least_squares=True, downsample=10) # Calibrate fringes measurement.calibrate_fringe_images(calibration) @@ -63,8 +59,7 @@ def example_driver(dir_save): # Calculate focal length from parabolic fit surf_coefs = sofast.data_characterization_facet[0].surf_coefs_facet focal_lengths_xy = [1 / 4 / surf_coefs[2], 1 / 4 / surf_coefs[5]] - lt.info(f'Facet xy focal lengths (meters): ' - f'{focal_lengths_xy[0]:.3f}, {focal_lengths_xy[1]:.3f}') + lt.info(f'Facet xy focal lengths (meters): ' f'{focal_lengths_xy[0]:.3f}, {focal_lengths_xy[1]:.3f}') # Get optic representation facet: Facet = sofast.get_optic() @@ -76,13 +71,7 @@ def example_driver(dir_save): # Visualize setup fig_record = fm.setup_figure_for_3d_data(figure_control, axis_control_m, title='') spatial_ori: SpatialOrientation = sofast.data_geometry_facet[0].spatial_orientation - visualize_setup( - display, - camera, - spatial_ori.v_screen_optic_screen, - spatial_ori.r_optic_screen, - ax=fig_record.axis, - ) + visualize_setup(display, camera, spatial_ori.v_screen_optic_screen, spatial_ori.r_optic_screen, ax=fig_record.axis) fig_record.save(dir_save, 'physical_setup_layout', 'png') # Plot slope map diff --git a/example/sofast_fringe/example_standard_mirror_plot_output.py b/example/sofast_fringe/example_standard_mirror_plot_output.py index f0036a7b..db2e64d9 100644 --- a/example/sofast_fringe/example_standard_mirror_plot_output.py +++ b/example/sofast_fringe/example_standard_mirror_plot_output.py @@ -11,9 +11,7 @@ from opencsp.common.lib.opencsp_path.opencsp_root_path import opencsp_code_dir -def plot_sofast_single_facet( - data_file: str, dir_save: str, focal_length_paraboloid: float -) -> None: +def plot_sofast_single_facet(data_file: str, dir_save: str, focal_length_paraboloid: float) -> None: """Loads and visualizes CSP optic from saved SOFAST HDF file containing measured data of an NSTTF Facet. @@ -60,22 +58,16 @@ def plot_sofast_single_facet( options.output_dir = dir_save # Create standard output plots - so.standard_output( - optic_meas, optic_ref, source, v_target_center, v_target_normal, options - ) + so.standard_output(optic_meas, optic_ref, source, v_target_center, v_target_normal, options) -def plot_sofast_facet_ensemble( - data_file: str, dir_save: str, focal_length_paraboloid: float -) -> None: +def plot_sofast_facet_ensemble(data_file: str, dir_save: str, focal_length_paraboloid: float) -> None: """Loads and visualizes CSP optic from saved SOFAST HDF file containing measured data of an NSTTF Heliostat. """ # Load data optic_meas = lsd.load_facet_ensemble_from_hdf(data_file) - optic_ref = lsd.load_ideal_facet_ensemble_from_hdf( - data_file, focal_length_paraboloid - ) + optic_ref = lsd.load_ideal_facet_ensemble_from_hdf(data_file, focal_length_paraboloid) # Define scene v_target_center = Vxyz((0, 0, 56.57)) @@ -107,9 +99,7 @@ def plot_sofast_facet_ensemble( options.output_dir = dir_save # Create standard output plots - so.standard_output( - optic_meas, optic_ref, source, v_target_center, v_target_normal, options - ) + so.standard_output(optic_meas, optic_ref, source, v_target_center, v_target_normal, options) def example_driver(): diff --git a/example/solarfield/example_SolarFieldOutput.py b/example/solarfield/example_SolarFieldOutput.py index 16c3e16c..fa8711fd 100755 --- a/example/solarfield/example_SolarFieldOutput.py +++ b/example/solarfield/example_SolarFieldOutput.py @@ -26,9 +26,7 @@ import opencsp.common.lib.render_control.RenderControlFacet as rcf import opencsp.common.lib.render_control.RenderControlFigure as rcfg from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord import opencsp.common.lib.render_control.RenderControlHeliostat as rch import opencsp.common.lib.render_control.RenderControlPointSeq as rcps import opencsp.common.lib.render_control.RenderControlSolarField as rcsf @@ -111,9 +109,7 @@ def example_single_heliostat(self) -> None: # comments\ comments.append("Demonstration of heliostat drawing.") - comments.append( - "Facet outlines shown, with facet names and overall heliostat surface normal." - ) + comments.append("Facet outlines shown, with facet names and overall heliostat surface normal.") comments.append('Render mirror surfaces only.') comments.append("Green: Facet outlines and overall surface normal.") @@ -149,11 +145,7 @@ def example_annotated_heliostat(self) -> None: # View setup title = 'Heliostat ' + heliostat_name + ', with Highlighting' - caption = ( - 'A single Sandia NSTTF heliostat with rendering options ' - + heliostat_name - + '.' - ) + caption = 'A single Sandia NSTTF heliostat with rendering options ' + heliostat_name + '.' comments = [] # Tracking setup. @@ -161,18 +153,12 @@ def example_annotated_heliostat(self) -> None: # [year, month, day, hour, minute, second, zone] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # NSTTF solar noon heliostat = self.solar_field.lookup_heliostat(heliostat_name) - heliostat.set_tracking( - aimpoint_xyz, self.solar_field.origin_lon_lat, when_ymdhmsz - ) + heliostat.set_tracking(aimpoint_xyz, self.solar_field.origin_lon_lat, when_ymdhmsz) # Style setup default_heliostat_style = rch.normal_facet_outlines() - default_heliostat_style.facet_styles.add_special_name( - 16, rcf.corner_normals_outline_name(color='c') - ) - default_heliostat_style.facet_styles.add_special_names( - [1, 4, 7, 24, 25], rcf.normal_outline(color='r') - ) + default_heliostat_style.facet_styles.add_special_name(16, rcf.corner_normals_outline_name(color='c')) + default_heliostat_style.facet_styles.add_special_names([1, 4, 7, 24, 25], rcf.normal_outline(color='r')) heliostat_styles = rce.RenderControlEnsemble(default_heliostat_style) # Comment @@ -180,9 +166,7 @@ def example_annotated_heliostat(self) -> None: comments.append("Black: Facet outlines.") comments.append("Black: Overall heliostat surface normal.") comments.append("Red: Highlighted facets and their surface normals.") - comments.append( - "Cyan: Highlighted facet with facet name and facet surface normal drawn at corners." - ) + comments.append("Cyan: Highlighted facet with facet name and facet surface normal drawn at corners.") # Draw fig_record = fm.setup_figure_for_3d_data( @@ -261,14 +245,10 @@ def example_multi_heliostat(self) -> None: comments.append("Green: Centroid and name.") comments.append("Blue: Facet outlines.") comments.append("Cyan: Overall outline and overall surface normal.") - comments.append( - "Magneta: Overall outline and overall surface normal, drawn at corners." - ) + comments.append("Magneta: Overall outline and overall surface normal, drawn at corners.") comments.append("Green: Facet outlines and overall surface normal.") comments.append("Cyan: Facet outlines and facet surface normals.") - comments.append( - "Black: Facet outlines and facet surface normals drawn at facet corners." - ) + comments.append("Black: Facet outlines and facet surface normals drawn at facet corners.") # Output. self.show_save_and_check_figure(fig_record) @@ -293,9 +273,7 @@ def example_solar_field_h_names(self) -> None: # Comment comments.append("Heliostat names, drawn at each heliostat's centroid.") - comments.append( - "At NSTTF, centroids appear to be at the midpoint of the torque tube." - ) + comments.append("At NSTTF, centroids appear to be at the midpoint of the torque tube.") # Draw fig_record = fm.setup_figure_for_3d_data( @@ -335,9 +313,7 @@ def example_solar_field_h_centroids(self) -> None: solar_field_style = rcsf.heliostat_centroids(color='b') # Comment - comments.append( - "Heliostat centroids, which at NSTTF appear to be at the midpoint of the torque tube." - ) + comments.append("Heliostat centroids, which at NSTTF appear to be at the midpoint of the torque tube.") # Draw and output in 3d fig_record = fm.setup_figure_for_3d_data( @@ -465,9 +441,7 @@ def example_solar_field_h_outlines(self) -> None: aimpoint_xyz = [60.0, 8.8, 28.9] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # NSTTF solar noon # [year, month, day, hour, minute, second, zone] - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_outlines(color='b') @@ -490,9 +464,7 @@ def example_solar_field_h_outlines(self) -> None: code_tag=self.code_tag, ) solar_field.draw(fig_record.view, solar_field_style) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') # Output. self.show_save_and_check_figure(fig_record) @@ -551,28 +523,18 @@ def example_annotated_solar_field(self) -> None: # Configuration setup solar_field = self.solar_field - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) solar_field.set_heliostats_configuration(stowed_heliostats, hc.NSTTF_stow()) synch_configuration = hc.HeliostatConfiguration(az=synch_az, el=synch_el) - solar_field.set_heliostats_configuration( - synched_heliostats, synch_configuration - ) + solar_field.set_heliostats_configuration(synched_heliostats, synch_configuration) up_configuration = hc.HeliostatConfiguration(az=up_az, el=up_el) solar_field.set_heliostats_configuration(up_heliostats, up_configuration) # Style setup solar_field_style = rcsf.heliostat_outlines(color='b') - solar_field_style.heliostat_styles.add_special_names( - up_heliostats, rch.normal_outline(color='c') - ) - solar_field_style.heliostat_styles.add_special_names( - stowed_heliostats, rch.normal_outline(color='r') - ) - solar_field_style.heliostat_styles.add_special_names( - synched_heliostats, rch.normal_outline(color='g') - ) + solar_field_style.heliostat_styles.add_special_names(up_heliostats, rch.normal_outline(color='c')) + solar_field_style.heliostat_styles.add_special_names(stowed_heliostats, rch.normal_outline(color='r')) + solar_field_style.heliostat_styles.add_special_names(synched_heliostats, rch.normal_outline(color='g')) # Comment comments.append("A solar field situation with heliostats in varying status.") @@ -635,20 +597,7 @@ def example_solar_field_subset(self) -> None: '7E6', '7E7', ] - tracking_heliostats = [ - '8E1', - '8E2', - '8E4', - '8E6', - '8E7', - '9E1', - '9E2', - '9E3', - '9E4', - '9E5', - '9E6', - '9E7', - ] + tracking_heliostats = ['8E1', '8E2', '8E4', '8E6', '8E7', '9E1', '9E2', '9E3', '9E4', '9E5', '9E6', '9E7'] # View setup title = 'Selected Heliostats' @@ -668,42 +617,24 @@ def example_solar_field_subset(self) -> None: # Configuration setup solar_field = self.solar_field - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) solar_field.set_heliostats_configuration(stowed_heliostats, hc.NSTTF_stow()) synch_configuration = hc.HeliostatConfiguration(az=synch_az, el=synch_el) - solar_field.set_heliostats_configuration( - synched_heliostats, synch_configuration - ) - solar_field.set_heliostats_configuration( - mirrored_heliostats, synch_configuration - ) + solar_field.set_heliostats_configuration(synched_heliostats, synch_configuration) + solar_field.set_heliostats_configuration(mirrored_heliostats, synch_configuration) up_configuration = hc.HeliostatConfiguration(az=up_az, el=up_el) solar_field.set_heliostats_configuration(up_heliostats, up_configuration) # Style setup solar_field_style = rcsf.heliostat_blanks() - solar_field_style.heliostat_styles.add_special_names( - mirrored_heliostats, rch.mirror_surfaces() - ) - solar_field_style.heliostat_styles.add_special_names( - up_heliostats, rch.facet_outlines(color='c') - ) - solar_field_style.heliostat_styles.add_special_names( - stowed_heliostats, rch.normal_outline(color='r') - ) - solar_field_style.heliostat_styles.add_special_names( - synched_heliostats, rch.normal_outline(color='g') - ) - solar_field_style.heliostat_styles.add_special_names( - tracking_heliostats, rch.facet_outlines(color='b') - ) + solar_field_style.heliostat_styles.add_special_names(mirrored_heliostats, rch.mirror_surfaces()) + solar_field_style.heliostat_styles.add_special_names(up_heliostats, rch.facet_outlines(color='c')) + solar_field_style.heliostat_styles.add_special_names(stowed_heliostats, rch.normal_outline(color='r')) + solar_field_style.heliostat_styles.add_special_names(synched_heliostats, rch.normal_outline(color='g')) + solar_field_style.heliostat_styles.add_special_names(tracking_heliostats, rch.facet_outlines(color='b')) # Comment - comments.append( - "A subset of heliostats selected, so that plot is effectively zoomed in." - ) + comments.append("A subset of heliostats selected, so that plot is effectively zoomed in.") comments.append("Grey heliostat shows mirrored surfaces.") comments.append("Blue heliostats are tracking.") comments.append("Cyan heliostats are face up.") @@ -751,17 +682,13 @@ def example_heliostat_vector_field(self) -> None: aimpoint_xyz = [60.0, 8.8, 28.9] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # NSTTF solar noon # [year, month, day, hour, minute, second, zone] - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_vector_field(color='b') # Comment - comments.append( - "Each heliostat's surface normal, which can be viewed as a vector field." - ) + comments.append("Each heliostat's surface normal, which can be viewed as a vector field.") # Draw and produce output for 3d fig_record = fm.setup_figure_for_3d_data( @@ -777,9 +704,7 @@ def example_heliostat_vector_field(self) -> None: comments=comments, code_tag=self.code_tag, ) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') solar_field.draw(fig_record.view, solar_field_style) self.show_save_and_check_figure(fig_record) @@ -797,9 +722,7 @@ def example_heliostat_vector_field(self) -> None: comments=comments, code_tag=self.code_tag, ) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') solar_field.draw(fig_record.view, solar_field_style) self.show_save_and_check_figure(fig_record) @@ -817,9 +740,7 @@ def example_heliostat_vector_field(self) -> None: comments=comments, code_tag=self.code_tag, ) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') solar_field.draw(fig_record.view, solar_field_style) self.show_save_and_check_figure(fig_record) @@ -837,9 +758,7 @@ def example_heliostat_vector_field(self) -> None: comments=comments, code_tag=self.code_tag, ) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') solar_field.draw(fig_record.view, solar_field_style) self.show_save_and_check_figure(fig_record) @@ -867,9 +786,7 @@ def example_dense_vector_field(self) -> None: aimpoint_xyz = [60.0, 8.8, 28.9] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # NSTTF solar noon # [year, month, day, hour, minute, second, zone] - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_vector_field_outlines(color='grey') @@ -892,25 +809,16 @@ def example_dense_vector_field(self) -> None: code_tag=self.code_tag, ) solar_field.draw(fig_record.view, solar_field_style) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') # Draw dense vector field. grid_xy = solar_field.heliostat_field_regular_grid_xy(40, 20) # grid_xydxy = [[p, sunt.tracking_surface_normal_xy(p+[0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz)] for p in grid_xy] grid_xydxy = [ - [ - p, - sun_track.tracking_surface_normal_xy( - p + [0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz - ), - ] + [p, sun_track.tracking_surface_normal_xy(p + [0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz)] for p in grid_xy ] - fig_record.view.draw_pqdpq_list( - grid_xydxy, style=rcps.vector_field(color='b', vector_scale=5.0) - ) + fig_record.view.draw_pqdpq_list(grid_xydxy, style=rcps.vector_field(color='b', vector_scale=5.0)) # Output. self.show_save_and_check_figure(fig_record) diff --git a/example/targetcolor/example_TargetColor.py b/example/targetcolor/example_TargetColor.py index 3a96dc04..727f2c6a 100644 --- a/example/targetcolor/example_TargetColor.py +++ b/example/targetcolor/example_TargetColor.py @@ -76,12 +76,7 @@ def setup_class( # ?? SCAFFOLDING RCB -- ADD COLOR_BAR TYPE TIP BELOW def execute_example_linear_color_bar( - self, - color_below_min: Color, - color_bar, - color_bar_name: str, - color_above_max: Color, - generate_all: bool, + self, color_below_min: Color, color_bar, color_bar_name: str, color_above_max: Color, generate_all: bool ) -> None: if generate_all: # Linear color bar in x, discrete. @@ -96,9 +91,7 @@ def execute_example_linear_color_bar( 'x', 'discrete', ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') if generate_all: # Linear color bar in x, continuous. @@ -113,9 +106,7 @@ def execute_example_linear_color_bar( 'x', 'continuous', ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # Linear color bar in y, discrete. target = tc.construct_target_linear_color_bar( @@ -129,9 +120,7 @@ def execute_example_linear_color_bar( 'y', 'discrete', ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') if generate_all: # Linear color bar in y, continuous. @@ -146,25 +135,15 @@ def execute_example_linear_color_bar( 'y', 'continuous', ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # ?? SCAFFOLDING RCB -- ADD COLOR_BAR TYPE TIP BELOW def execute_example_polar_color_bar( - self, - color_below_min: Color, - color_bar, - color_bar_name: str, - color_above_max: Color, + self, color_below_min: Color, color_bar, color_bar_name: str, color_above_max: Color ) -> None: # Default. - target = tc.construct_target_polar_color_bar( - self.image_width, self.image_height, self.dpm - ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + target = tc.construct_target_polar_color_bar(self.image_width, self.image_height, self.dpm) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # Selected for first 3m x 3m print. target = tc.construct_target_polar_color_bar( @@ -177,63 +156,43 @@ def execute_example_polar_color_bar( radial_gradient_name='l2s', light_center_to_saturated_saturation_min=0.2, ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') def execute_example_blue_under_red_cross_green(self) -> None: # Construct target. - target = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + target = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') def execute_example_rgb_cube_inscribed_square(self, project_to_cube: bool) -> None: # Construct target. target = tc.construct_target_rgb_cube_inscribed_square( self.image_width, self.image_height, self.dpm, project_to_cube ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # TARGET MODIFICATION TESTS - def execute_example_adjust_color_saturation( - self, saturation_fraction: float - ) -> None: + def execute_example_adjust_color_saturation(self, saturation_fraction: float) -> None: # Construct target. - target = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) + target = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) # Adjust color saturation. target.adjust_color_saturation(saturation_fraction) - print( - 'WARNING: In execute_example_adjust_color_saturation(), saturation adjustment not implemented yet.' - ) + print('WARNING: In execute_example_adjust_color_saturation(), saturation adjustment not implemented yet.') # Save and check. - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # TARGET EXTENSION TESTS def execute_example_extend_target(self) -> None: # Target. - target = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) + target = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) # Extend left. left_pixels = 10 # Pixels extended_target_left = tc.extend_target_left(target, left_pixels, Color.white()) # self.save_and_check_image(extended_target_left.image, self.dpm, extended_target_left.description_inch(), '.png') # Extend right. right_pixels = 20 # Pixels - extended_target_left_right = tc.extend_target_right( - extended_target_left, right_pixels, Color.grey() - ) + extended_target_left_right = tc.extend_target_right(extended_target_left, right_pixels, Color.grey()) # self.save_and_check_image(extended_target_left_right.image, self.dpm, extended_target_left_right.description_inch(), '.png') # Extend top. top_pixels = 30 # Pixels @@ -264,9 +223,7 @@ def execute_example_extend_target(self) -> None: def execute_example_splice_targets_above_below(self) -> None: # ?? SCAFFOLDING RCB -- FIXUP PARAMETER PASSING, ETC. # Target #1. - target_1 = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) + target_1 = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) # Target #2. # Selected for first 3m x 3m print. # project_to_cube = True @@ -284,12 +241,8 @@ def execute_example_splice_targets_above_below(self) -> None: # Combine. # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? gap = 0 # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? - spliced_target = tc.splice_targets_above_below( - target_1, target_2, gap, Color.white() - ) - self.save_and_check_image( - spliced_target.image, self.dpm, spliced_target.description_inch(), '.png' - ) + spliced_target = tc.splice_targets_above_below(target_1, target_2, gap, Color.white()) + self.save_and_check_image(spliced_target.image, self.dpm, spliced_target.description_inch(), '.png') def execute_example_cascade_target_A(self) -> None: # For tall linear target elements. @@ -327,9 +280,7 @@ def execute_example_cascade_target_A(self) -> None: # Main: Color bar corrected for Nikon D3300 response. color_below_min = Color.black() # Black below bottom of color bar. color_bar = tcc.nikon_D3300_monitor_equal_step_color_bar() - color_bar_name = ( - 'D3300_monitor' # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS MEMBER - ) + color_bar_name = 'D3300_monitor' # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS MEMBER color_above_max = Color.white() # White background for "saturated data." # Closed color wheel linear color bar. ref_color_below_min = Color.black() # Black below bottom of color bar. @@ -378,10 +329,7 @@ def execute_example_cascade_target_A(self) -> None: list_of_saturation_spec_lists=[ [[None, None, None, None]], # 2-stack - [ - ['light_to_saturated', None, 0.4, 1.0], - ['saturated_to_white', 1.25, None, None], - ], + [['light_to_saturated', None, 0.4, 1.0], ['saturated_to_white', 1.25, None, None]], # 5-stack [ [None, None, None, None], @@ -411,9 +359,7 @@ def execute_example_cascade_target_A(self) -> None: gap_between_bars_pix=round( gap_between_bars * composite_dpm ), # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? - ref_gap_pix=round( - ref_gap * composite_dpm - ), # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? + ref_gap_pix=round(ref_gap * composite_dpm), # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? gap_color=Color.white(), ) @@ -421,25 +367,14 @@ def execute_example_cascade_target_A(self) -> None: n_ticks_x = 13 # No units. Number of tick marks to draw along top/bottom horizontal target edges. n_ticks_y = 25 # No units. Number of tick marks to draw along left/right vertical target edges. tick_length = 0.010 # Meters. Length to draw edge tick marks. - tick_width_pix = ( - 3 # Pixels. Width to draw edge tick marks; should be odd number. - ) + tick_width_pix = 3 # Pixels. Width to draw edge tick marks; should be odd number. tick_color: Color = Color.black() # Color. Color of edge tick marks. - cascade_target.set_ticks_along_top_and_bottom_edges( - n_ticks_x, tick_length, tick_width_pix, tick_color - ) - cascade_target.set_ticks_along_left_and_right_edges( - n_ticks_y, tick_length, tick_width_pix, tick_color - ) + cascade_target.set_ticks_along_top_and_bottom_edges(n_ticks_x, tick_length, tick_width_pix, tick_color) + cascade_target.set_ticks_along_left_and_right_edges(n_ticks_y, tick_length, tick_width_pix, tick_color) # Save result. # self.save_and_check_image(cascade_target.image, composite_dpm, cascade_target.description_inch(), '.tiff') #'.png') - self.save_and_check_image( - cascade_target.image, - composite_dpm, - cascade_target.description_inch(), - '.png', - ) + self.save_and_check_image(cascade_target.image, composite_dpm, cascade_target.description_inch(), '.png') def example_matlab(self) -> None: # Initialize test. @@ -449,9 +384,7 @@ def example_matlab(self) -> None: color_bar = tcc.matlab_color_bar() color_bar_name = 'matlab' color_above_max = Color.white() # White background for "saturated data." - self.execute_example_linear_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max, False - ) + self.execute_example_linear_color_bar(color_below_min, color_bar, color_bar_name, color_above_max, False) def example_matlab_equal_angle(self) -> None: # Initialize test. @@ -461,9 +394,7 @@ def example_matlab_equal_angle(self) -> None: color_bar = tcc.normalize_color_bar_to_equal_angles(tcc.matlab_color_bar()) color_bar_name = 'matlab_equal_angle' color_above_max = Color.white() # White background for "saturated data." - self.execute_example_linear_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max, False - ) + self.execute_example_linear_color_bar(color_below_min, color_bar, color_bar_name, color_above_max, False) # # Closed corner tour color bar. # # ?? SCAFFOLDING RCB -- USE THIS TO CLARIFY AND THEN FIX PROBLEMS WITH COLOR INTERPOLATION. @@ -480,14 +411,10 @@ def example_corner_tour_closed_equal_angle(self) -> None: self.start_test() # Normalized closed corner tour color bar. color_below_min = Color.black() # Black below bottom of color bar. - color_bar = tcc.normalize_color_bar_to_equal_angles( - tcc.corner_tour_closed_color_bar() - ) + color_bar = tcc.normalize_color_bar_to_equal_angles(tcc.corner_tour_closed_color_bar()) color_bar_name = 'corner_tour_closed_equal_angle' color_above_max = Color.white() # White background for "saturated data." - self.execute_example_linear_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max, True - ) + self.execute_example_linear_color_bar(color_below_min, color_bar, color_bar_name, color_above_max, True) # # Closed color wheel linear color bar. # color_below_min = Color.black() # Black below bottom of color bar. @@ -504,9 +431,7 @@ def example_polar_color_bar(self) -> None: color_bar = tcc.O_color_bar() color_bar_name = 'O' # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS MEMBER color_above_max = Color.white() # White background for "saturated data." - self.execute_example_polar_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max - ) + self.execute_example_polar_color_bar(color_below_min, color_bar, color_bar_name, color_above_max) def example_blue_under_red_cross_green(self) -> None: # Initialize test. diff --git a/opencsp/app/camera_calibration/CameraCalibration.py b/opencsp/app/camera_calibration/CameraCalibration.py index f499a4a5..27cfd0e6 100644 --- a/opencsp/app/camera_calibration/CameraCalibration.py +++ b/opencsp/app/camera_calibration/CameraCalibration.py @@ -74,14 +74,9 @@ def create_layout(self): # Name of camera input self.var_cam_name = tkinter.StringVar(value='Camera') - lbl_cam_name = tkinter.Label( - self.root, text='Camera Name:', font=('calibre', 10, 'bold') - ) + lbl_cam_name = tkinter.Label(self.root, text='Camera Name:', font=('calibre', 10, 'bold')) entry_cam_name = tkinter.Entry( - self.root, - textvariable=self.var_cam_name, - font=('calibre', 10, 'normal'), - width=40, + self.root, textvariable=self.var_cam_name, font=('calibre', 10, 'normal'), width=40 ) lbl_cam_name.grid(row=r, column=0, pady=2, padx=2, sticky='nsw') @@ -91,24 +86,10 @@ def create_layout(self): # Number of points input self.var_pts_x = tkinter.IntVar(value=18) self.var_pts_y = tkinter.IntVar(value=23) - lbl_pts_x = tkinter.Label( - self.root, text='Number of grid x points:', font=('calibre', 10, 'bold') - ) - lbl_pts_y = tkinter.Label( - self.root, text='Number of grid y points:', font=('calibre', 10, 'bold') - ) - entry_pts_x = tkinter.Entry( - self.root, - textvariable=self.var_pts_x, - font=('calibre', 10, 'normal'), - width=10, - ) - entry_pts_y = tkinter.Entry( - self.root, - textvariable=self.var_pts_y, - font=('calibre', 10, 'normal'), - width=10, - ) + lbl_pts_x = tkinter.Label(self.root, text='Number of grid x points:', font=('calibre', 10, 'bold')) + lbl_pts_y = tkinter.Label(self.root, text='Number of grid y points:', font=('calibre', 10, 'bold')) + entry_pts_x = tkinter.Entry(self.root, textvariable=self.var_pts_x, font=('calibre', 10, 'normal'), width=10) + entry_pts_y = tkinter.Entry(self.root, textvariable=self.var_pts_y, font=('calibre', 10, 'normal'), width=10) lbl_pts_x.grid(row=r, column=0, pady=2, padx=2, sticky='nsw') entry_pts_x.grid(row=r, column=1, pady=2, padx=2, sticky='nsw') @@ -118,30 +99,22 @@ def create_layout(self): r += 1 # Select images button - self.btn_select_ims = tkinter.Button( - self.root, text='Select Images', command=self.select_images - ) + self.btn_select_ims = tkinter.Button(self.root, text='Select Images', command=self.select_images) self.btn_select_ims.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') r += 1 # Find corners button - self.btn_find_corns = tkinter.Button( - self.root, text='Find Corners', command=self.find_corners - ) + self.btn_find_corns = tkinter.Button(self.root, text='Find Corners', command=self.find_corners) self.btn_find_corns.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') r += 1 # View annotated images button - self.btn_view_corns = tkinter.Button( - self.root, text='View Found Corners', command=self.view_found_corners - ) + self.btn_view_corns = tkinter.Button(self.root, text='View Found Corners', command=self.view_found_corners) self.btn_view_corns.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') r += 1 # Calibrate button - self.btn_calibrate = tkinter.Button( - self.root, text='Calibrate Camera', command=self.calibrate_camera - ) + self.btn_calibrate = tkinter.Button(self.root, text='Calibrate Camera', command=self.calibrate_camera) self.btn_calibrate.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') r += 1 @@ -153,16 +126,12 @@ def create_layout(self): r += 1 # Visualize distortion - self.btn_vis_dist = tkinter.Button( - self.root, text='Visualize distortion', command=self.visualize_dist - ) + self.btn_vis_dist = tkinter.Button(self.root, text='Visualize distortion', command=self.visualize_dist) self.btn_vis_dist.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') r += 1 # Save camera button - self.btn_save = tkinter.Button( - self.root, text='Save Camera', command=self.save_camera - ) + self.btn_save = tkinter.Button(self.root, text='Save Camera', command=self.save_camera) self.btn_save.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') r += 1 @@ -172,20 +141,9 @@ def create_layout(self): r += 1 # Reprojection error labels - lbl = tkinter.Label( - text='Reprojection Error', - borderwidth=1, - relief='solid', - font=('calibre', 10, 'bold'), - ) + lbl = tkinter.Label(text='Reprojection Error', borderwidth=1, relief='solid', font=('calibre', 10, 'bold')) lbl.grid(row=r, column=0, sticky='nsew', pady=(20, 0)) - lbl = tkinter.Label( - text='Image Name', - width=40, - borderwidth=1, - relief='solid', - font=('calibre', 10, 'bold'), - ) + lbl = tkinter.Label(text='Image Name', width=40, borderwidth=1, relief='solid', font=('calibre', 10, 'bold')) lbl.grid(row=r, column=1, sticky='nsw', pady=(20, 0)) r += 1 @@ -198,18 +156,9 @@ def create_layout(self): self.var_reproj_val.append(var_val) self.var_reproj_name.append(var_name) - lbl_val = tkinter.Label( - textvariable=var_val, - borderwidth=1, - relief="solid", - font=('calibre', 10, 'normal'), - ) + lbl_val = tkinter.Label(textvariable=var_val, borderwidth=1, relief="solid", font=('calibre', 10, 'normal')) lbl_name = tkinter.Label( - textvariable=var_name, - borderwidth=1, - width=40, - relief="solid", - font=('calibre', 10, 'normal'), + textvariable=var_name, borderwidth=1, width=40, relief="solid", font=('calibre', 10, 'normal') ) lbl_val.grid(row=r, column=0, sticky='nsew') @@ -245,9 +194,7 @@ def select_images(self): ('TIF files', ('*.tif', '*.tiff')), ('GIF files', '*.gif'), ] - files = askopenfilename( - filetypes=filetypes, title="Select image files", multiple=True - ) + files = askopenfilename(filetypes=filetypes, title="Select image files", multiple=True) if len(files) != '': # Save files @@ -351,21 +298,16 @@ def calibrate_camera(self): cam_name = self.var_cam_name.get() # Calibrate camera - ( - self.camera, - self.r_cam_object, - self.v_cam_object_cam, - self.avg_reproj_error, - ) = cc.calibrate_camera(self.p_object, self.p_image, self.img_size_xy, cam_name) + (self.camera, self.r_cam_object, self.v_cam_object_cam, self.avg_reproj_error) = cc.calibrate_camera( + self.p_object, self.p_image, self.img_size_xy, cam_name + ) # Calculate reprojection error for each image self.reproj_errors = [] for R_cam, V_cam, P_object, P_image in zip( self.r_cam_object, self.v_cam_object_cam, self.p_object, self.p_image ): - error = sp.reprojection_error( - self.camera, P_object, P_image, R_cam, V_cam - ) # RMS pixels + error = sp.reprojection_error(self.camera, P_object, P_image, R_cam, V_cam) # RMS pixels self.reproj_errors.append(error) # RMS pixels # Find five images with highest reprojection errors @@ -401,9 +343,7 @@ def visualize_dist(self): plt.show(block=False) def save_camera(self): - file = asksaveasfilename( - defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")] - ) + file = asksaveasfilename(defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")]) if file != '': self.camera.save_to_hdf(file) @@ -462,9 +402,7 @@ def enable_btns(self): self.lbl_num_files.config(text=f'{len(self.files):d} files') self.lbl_corns_found.config(text='All corners found') - self.lbl_cam_calibrated.config( - text=f'Average reprojection error: {self.avg_reproj_error:.2f} pixels' - ) + self.lbl_cam_calibrated.config(text=f'Average reprojection error: {self.avg_reproj_error:.2f} pixels') def clear_reproj_errors(self): for name, val in zip(self.var_reproj_name, self.var_reproj_val): diff --git a/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py b/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py index 1be76e70..ee6af39a 100644 --- a/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py +++ b/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py @@ -52,9 +52,7 @@ def __init__(self, root: tkinter.Tk, images: list[ndarray], image_names: list[st # Create image title self.var_title = tkinter.StringVar(value=image_names[self.idx_im]) - title = tkinter.Label( - root, textvariable=self.var_title, font=('calibre', 15, 'bold') - ) + title = tkinter.Label(root, textvariable=self.var_title, font=('calibre', 15, 'bold')) # Create drawing canvas self.canvas = tkinter.Canvas(root, width=self.width, height=self.height) diff --git a/opencsp/app/camera_calibration/lib/calibration_camera.py b/opencsp/app/camera_calibration/lib/calibration_camera.py index 35a5b0f2..a0ebccf4 100644 --- a/opencsp/app/camera_calibration/lib/calibration_camera.py +++ b/opencsp/app/camera_calibration/lib/calibration_camera.py @@ -15,10 +15,7 @@ def calibrate_camera( - p_object: Iterable[Vxyz], - p_image: Iterable[Vxy], - img_shape_xy: tuple[int, int], - name: str, + p_object: Iterable[Vxyz], p_image: Iterable[Vxy], img_shape_xy: tuple[int, int], name: str ) -> tuple[Camera, Iterable[Rotation], Iterable[Vxyz], float]: """ Performs 4 term camera calibration for non-fisheye lens. @@ -54,12 +51,7 @@ def calibrate_camera( img_pts_list = [v.data.T for v in p_image] dist_input = np.zeros(4, dtype=np.float32) error, mtx, dist, rvecs, tvecs = cv.calibrateCamera( - obj_pts_list, - img_pts_list, - img_shape_xy, - None, - dist_input, - flags=cv.CALIB_FIX_K3, + obj_pts_list, img_pts_list, img_shape_xy, None, dist_input, flags=cv.CALIB_FIX_K3 ) # Keep only first four distortion coefficients dist = dist[:4].squeeze() @@ -76,9 +68,7 @@ def calibrate_camera( return camera, r_cam_object, v_cam_object_cam, error -def view_distortion( - camera: Camera, ax1: Axes, ax2: Axes, ax3: Axes, num_samps: int = 12 -): +def view_distortion(camera: Camera, ax1: Axes, ax2: Axes, ax3: Axes, num_samps: int = 12): """ Plots the radial/tangential distortion of a camera object. @@ -118,12 +108,7 @@ def view_distortion( def calc_dx_dy(dist_coef): """Calculate distorted x and y pixel maps""" mx_cal, my_cal = cv.initUndistortRectifyMap( - camera.intrinsic_mat, - dist_coef, - np.eye(3).astype(np.float32), - camera.intrinsic_mat, - img_shape, - cv.CV_32FC1, + camera.intrinsic_mat, dist_coef, np.eye(3).astype(np.float32), camera.intrinsic_mat, img_shape, cv.CV_32FC1 ) mx_cal -= np.float32(camera.intrinsic_mat[0, 2]) @@ -145,9 +130,7 @@ def calc_dx_dy(dist_coef): dx_tot, dy_tot = calc_dx_dy(camera.distortion_coef) # Plot radial distortion - ax1.quiver( - mx[y1::N, x1::N], my[y1::N, x1::N], dx_rad[y1::N, x1::N], dy_rad[y1::N, x1::N] - ) + ax1.quiver(mx[y1::N, x1::N], my[y1::N, x1::N], dx_rad[y1::N, x1::N], dy_rad[y1::N, x1::N]) ax1.set_ylim(0, img_shape[1]) ax1.set_xlim(0, img_shape[0]) ax1.set_xlabel('X (pixel)') @@ -157,9 +140,7 @@ def calc_dx_dy(dist_coef): ax1.grid() # Plot tangential distortion - ax2.quiver( - mx[y1::N, x1::N], my[y1::N, x1::N], dx_tan[y1::N, x1::N], dy_tan[y1::N, x1::N] - ) + ax2.quiver(mx[y1::N, x1::N], my[y1::N, x1::N], dx_tan[y1::N, x1::N], dy_tan[y1::N, x1::N]) ax2.set_ylim(0, img_shape[1]) ax2.set_xlim(0, img_shape[0]) ax2.set_xlabel('X (pixel)') @@ -169,9 +150,7 @@ def calc_dx_dy(dist_coef): ax2.grid() # Plot total distortion - ax3.quiver( - mx[y1::N, x1::N], my[y1::N, x1::N], dx_tot[y1::N, x1::N], dy_tot[y1::N, x1::N] - ) + ax3.quiver(mx[y1::N, x1::N], my[y1::N, x1::N], dx_tot[y1::N, x1::N], dy_tot[y1::N, x1::N]) ax3.set_ylim(0, img_shape[1]) ax3.set_xlim(0, img_shape[0]) ax3.set_xlabel('X (pixel)') diff --git a/opencsp/app/camera_calibration/lib/image_processing.py b/opencsp/app/camera_calibration/lib/image_processing.py index efd6b449..9d6a0812 100644 --- a/opencsp/app/camera_calibration/lib/image_processing.py +++ b/opencsp/app/camera_calibration/lib/image_processing.py @@ -8,9 +8,7 @@ from opencsp.common.lib.geometry.Vxyz import Vxyz -def find_checkerboard_corners( - npts: tuple[int, int], img: np.ndarray -) -> tuple[Vxyz, Vxy]: +def find_checkerboard_corners(npts: tuple[int, int], img: np.ndarray) -> tuple[Vxyz, Vxy]: """ Finds checkerboard corners in given image. @@ -33,11 +31,7 @@ def find_checkerboard_corners( """ # Find corners - chessboard_flags = ( - cv.CALIB_CB_ADAPTIVE_THRESH - + cv.CALIB_CB_FAST_CHECK - + cv.CALIB_CB_NORMALIZE_IMAGE - ) + chessboard_flags = cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_FAST_CHECK + cv.CALIB_CB_NORMALIZE_IMAGE ret, corners = cv.findChessboardCorners(img, npts, chessboard_flags) # Check corners were found @@ -48,9 +42,7 @@ def find_checkerboard_corners( p_corners = Vxy(corners[:, 0, :].T, dtype=np.float32) # Refine the corners - p_corners_refined = refine_checkerboard_corners( - img, p_corners, window_size=(11, 11) - ) + p_corners_refined = refine_checkerboard_corners(img, p_corners, window_size=(11, 11)) # Define object points (x,y,z) as in: (0,0,0), (1,0,0), (2,0,0), ... (6,5,0) objp = np.zeros((npts[0] * npts[1], 3), dtype=np.float32) @@ -95,9 +87,7 @@ def refine_checkerboard_corners( return Vxy(imgpts_refined.T, dtype=np.float32) -def annotate_found_corners( - npts: tuple[int, int], img: np.ndarray, img_points: Vxy -) -> None: +def annotate_found_corners(npts: tuple[int, int], img: np.ndarray, img_points: Vxy) -> None: """ Updates an image with found checkerboard corner annotations. diff --git a/opencsp/app/camera_calibration/test/test_camera_calibration.py b/opencsp/app/camera_calibration/test/test_camera_calibration.py index 528b0f78..5cdf50e5 100644 --- a/opencsp/app/camera_calibration/test/test_camera_calibration.py +++ b/opencsp/app/camera_calibration/test/test_camera_calibration.py @@ -64,15 +64,13 @@ def setup_class(cls, regenerate=False): img_size = images[0].shape # Calibrate camera - (camera, r_cam_object, v_cam_object_cam, calibration_error) = ( - cc.calibrate_camera(p_object, p_image, img_size, cam_name) + (camera, r_cam_object, v_cam_object_cam, calibration_error) = cc.calibrate_camera( + p_object, p_image, img_size, cam_name ) # Calculate reprojection errors errors = [] - for rot, vec, p_obj, p_img in zip( - r_cam_object, v_cam_object_cam, p_object, p_image - ): + for rot, vec, p_obj, p_img in zip(r_cam_object, v_cam_object_cam, p_object, p_image): error = sp.reprojection_error(camera, p_obj, p_img, rot, vec) errors.append(error) @@ -159,9 +157,7 @@ def test_calibration_error(self): np.testing.assert_allclose(self.calibration_error, self.calibration_error_exp) def test_reprojection_errors(self): - np.testing.assert_allclose( - self.reprojection_errors, self.reprojection_errors_exp - ) + np.testing.assert_allclose(self.reprojection_errors, self.reprojection_errors_exp) if __name__ == "__main__": diff --git a/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py b/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py index 7424b467..3c9a9bcb 100644 --- a/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py +++ b/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py @@ -33,9 +33,7 @@ class SceneReconstruction: triangulation, by default 0.02 meters. """ - def __init__( - self, camera: Camera, known_point_locations: ndarray, image_filter_path: str - ) -> 'SceneReconstruction': + def __init__(self, camera: Camera, known_point_locations: ndarray, image_filter_path: str) -> 'SceneReconstruction': """Instantiates SceneReconstruction class Parameters @@ -94,21 +92,15 @@ def all_image_tvecs(self) -> ndarray: @property def unlocated_marker_ids(self) -> ndarray: """Returns all unlocated marker IDs""" - return np.unique( - np.hstack([image.unlocated_point_ids for image in self.images]) - ) + return np.unique(np.hstack([image.unlocated_point_ids for image in self.images])) def convert_to_four_corners(self) -> None: """Converts all images to four corner images instead of single points""" for image in self.images: image.convert_to_four_corner() - self.unique_point_ids = np.unique( - np.hstack([im.point_ids for im in self.images]) - ) - self.unique_marker_ids = np.floor( - self.unique_point_ids.astype(float) / 4 - ).astype(int) + self.unique_point_ids = np.unique(np.hstack([im.point_ids for im in self.images])) + self.unique_marker_ids = np.floor(self.unique_point_ids.astype(float) / 4).astype(int) mask_nan = np.array([1, np.nan, np.nan, np.nan] * self.num_points) mask_zero = np.array([1, 0, 0, 0] * self.num_points) @@ -116,9 +108,7 @@ def convert_to_four_corners(self) -> None: self.num_points = self.unique_point_ids.size self.points_xyz = np.repeat(self.points_xyz, 4, axis=0) * mask_nan[:, None] self.located_point_ids = self.located_point_ids * 4 - self.located_point_mask = ( - np.repeat(self.located_point_mask, 4) * mask_zero - ).astype(bool) + self.located_point_mask = (np.repeat(self.located_point_mask, 4) * mask_zero).astype(bool) def set_id_known(self, id_: int, pt: np.ndarray) -> None: """Sets given ID as known in all images @@ -164,15 +154,11 @@ def save_ids_known(self) -> None: def load_images(self) -> None: """Saves loaded dataset in class""" self.images: list[ImageMarker] = [] - for idx, file in enumerate( - tqdm(self.image_paths, desc='Loading marker images') - ): + for idx, file in enumerate(tqdm(self.image_paths, desc='Loading marker images')): self.images.append(ImageMarker.load_aruco_origin(file, idx, self.camera)) # Save unique markers - self.unique_point_ids = np.unique( - np.hstack([im.point_ids for im in self.images]) - ) + self.unique_point_ids = np.unique(np.hstack([im.point_ids for im in self.images])) self.unique_marker_ids = self.unique_point_ids.copy() self.num_markers = self.unique_point_ids.size self.num_points = self.unique_point_ids.size @@ -227,9 +213,7 @@ def attempt_all_points_triangulation(self, intersect_thres: float = 0.02) -> Non tvecs = [] pts_img = [] for im in images: - idx = im.point_ids.tolist().index( - id_ - ) # Index of point in current image + idx = im.point_ids.tolist().index(id_) # Index of point in current image pts_img.append(im.pts_im_xy[idx]) # Location of 2d point in image rots.append(Rotation.from_rotvec(im.rvec)) tvecs.append(im.tvec) @@ -263,9 +247,7 @@ def refine_located_poses_and_points(self) -> None: camera_idxs.append([image.img_id] * image.num_located_markers) # Point (0-based) index for every point observation that has been located pt_ids = image.located_point_ids # ID number - pt_idxs = [ - self.unique_point_ids.tolist().index(pt_id) for pt_id in pt_ids - ] # index + pt_idxs = [self.unique_point_ids.tolist().index(pt_id) for pt_id in pt_ids] # index point_indices.append(pt_idxs) # 2d image points of every point observation points2d.append(image.pts_im_xy[image.located_markers_mask]) @@ -292,9 +274,7 @@ def refine_located_poses_and_points(self) -> None: ) # Update 3d marker points - self.set_ids_known( - self.located_point_ids, pts_marker_opt[self.located_point_mask] - ) + self.set_ids_known(self.located_point_ids, pts_marker_opt[self.located_point_mask]) # Update rvec/tvec for idx in self.located_camera_idxs: @@ -322,9 +302,7 @@ def scale_points(self, point_pairs: ndarray[int], distances: ndarray[float]) -> (N,) array of distances between point pairs """ # Calculate scales - scales = ph.scale_points( - Vxyz(self.points_xyz.T), self.unique_point_ids, point_pairs * 4, distances - ) + scales = ph.scale_points(Vxyz(self.points_xyz.T), self.unique_point_ids, point_pairs * 4, distances) lt.info('Point cloud scaling summary:') lt.info(f'Calculated average point cloud scale: {scales.mean():.4f}.') @@ -333,12 +311,7 @@ def scale_points(self, point_pairs: ndarray[int], distances: ndarray[float]) -> # Apply scale to points self.points_xyz *= scales.mean() - def align_points( - self, - marker_ids: ndarray[int], - alignment_values: Vxyz, - apply_scale: bool = False, - ) -> None: + def align_points(self, marker_ids: ndarray[int], alignment_values: Vxyz, apply_scale: bool = False) -> None: """Aligns selected markers origin points (corner index 0) within point cloud to match given alignment values. Set to NAN for floating. Points are aligned FIRST, then transformed. @@ -498,8 +471,7 @@ def optimize(self, intersect_thresh: float = 0.02) -> None: # Check that all markers have been found if self.unlocated_marker_ids.size != 0: lt.warn( - f'{self.unlocated_marker_ids.size:d} markers remain unlocated. ' - 'More camera images may be needed.' + f'{self.unlocated_marker_ids.size:d} markers remain unlocated. ' 'More camera images may be needed.' ) # Convert to 4-corner model @@ -513,8 +485,7 @@ def optimize(self, intersect_thresh: float = 0.02) -> None: self.refine_located_poses_and_points() def run_calibration(self) -> None: - """Runs the calibration sequence - """ + """Runs the calibration sequence""" # Run calibration self.load_images() self.save_ids_known() @@ -538,10 +509,4 @@ def get_data(self) -> ndarray: Marker ID, point ID, X, Y, Z """ - return np.hstack( - ( - self.unique_marker_ids[:, None], - self.unique_point_ids[:, None], - self.points_xyz, - ) - ) + return np.hstack((self.unique_marker_ids[:, None], self.unique_point_ids[:, None], self.points_xyz)) diff --git a/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py b/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py index 3794b632..de01a633 100644 --- a/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py +++ b/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py @@ -33,11 +33,7 @@ def downsample_dataset(dir_input: str, dir_output: str) -> None: # Copy files that don't need downsampling print('Copying files with no downsampling...') - files = [ - "point_pair_distances.csv", - "known_point_locations.csv", - "alignment_points.csv", - ] + files = ["point_pair_distances.csv", "known_point_locations.csv", "alignment_points.csv"] for file in files: shutil.copy(join(dir_input, file), join(dir_output, file)) @@ -53,9 +49,7 @@ def downsample_dataset(dir_input: str, dir_output: str) -> None: # Convert to monochrome and downsample im_ds = dd.downsample_images(im.astype(float).mean(2), n_aruco) # Save image - imageio.imwrite( - join(dir_output_aruco_images, file_name), im_ds, quality=jpg_quality - ) + imageio.imwrite(join(dir_output_aruco_images, file_name), im_ds, quality=jpg_quality) # Downsample aruco marker camera print('Downsampling camera...') @@ -65,9 +59,6 @@ def downsample_dataset(dir_input: str, dir_output: str) -> None: if __name__ == '__main__': downsample_dataset( - dir_input=join( - opencsp_code_dir(), - '../../sample_data/scene_reconstruction/data_measurement', - ), + dir_input=join(opencsp_code_dir(), '../../sample_data/scene_reconstruction/data_measurement'), dir_output=join(os.path.dirname(__file__), 'data/data_measurement'), ) diff --git a/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py b/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py index f1199754..527ec350 100644 --- a/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py +++ b/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py @@ -78,9 +78,7 @@ def setUpClass(cls, dir_input: str = None, dir_output: str = None): def test_calibrated_corner_locations(self): """Tests relative corner locations""" - pts_exp = np.loadtxt( - join(self.dir_output, 'point_locations.csv'), delimiter=',', skiprows=1 - ) + pts_exp = np.loadtxt(join(self.dir_output, 'point_locations.csv'), delimiter=',', skiprows=1) np.testing.assert_allclose(self.pts_meas, pts_exp, atol=1e-5, rtol=0) print('Corner locations tested successfully.') diff --git a/opencsp/app/select_image_points/SelectImagePoints.py b/opencsp/app/select_image_points/SelectImagePoints.py index c6b196d7..07b01cb3 100644 --- a/opencsp/app/select_image_points/SelectImagePoints.py +++ b/opencsp/app/select_image_points/SelectImagePoints.py @@ -43,10 +43,7 @@ def __init__(self, root: tk.Tk, file_name: str) -> 'SelectImagePoints': # Define system parameters screen_width = root.winfo_screenwidth() screen_height = root.winfo_screenheight() - self.win_size_max = ( - int(screen_width * frac_window), - int(screen_height * frac_window), - ) + self.win_size_max = (int(screen_width * frac_window), int(screen_height * frac_window)) self.roi_width = int(min(screen_height, screen_width) * frac_roi) # pixels self.save_name = os.path.basename(file_name).split('.')[-2] self.image_file_name = file_name @@ -86,9 +83,7 @@ def update_image(self) -> None: """ Updates the current displayed image to current loaded image """ - self.image_display = self.canvas.create_image( - 0, 0, anchor='nw', image=self.image_tk - ) + self.image_display = self.canvas.create_image(0, 0, anchor='nw', image=self.image_tk) def click(self, event) -> None: """ @@ -105,9 +100,7 @@ def click_fine(self, event): """ Called when fine res image is clicked """ - self.pts[-1] += np.array([event.x / self.scale, event.y / self.scale]).astype( - int - ) # image pixels + self.pts[-1] += np.array([event.x / self.scale, event.y / self.scale]).astype(int) # image pixels self.revert_main_image() def click_rough(self, event): @@ -144,12 +137,8 @@ def load_image_from_array(self, image: np.ndarray) -> None: image_pil = Image.fromarray(image.copy(), 'RGB') # Resize image - size_x = ( - float(self.win_size_max[0]) / image_pil.size[0] - ) # window pixels / image pixels - size_y = ( - float(self.win_size_max[1]) / image_pil.size[1] - ) # window pixels / image pixels + size_x = float(self.win_size_max[0]) / image_pil.size[0] # window pixels / image pixels + size_y = float(self.win_size_max[1]) / image_pil.size[1] # window pixels / image pixels self.scale = min(size_x, size_y) # window pixels / image pixels shape = ( int(float(image_pil.size[0]) * self.scale), @@ -207,8 +196,7 @@ def _load_raw_image(file) -> np.ndarray: if __name__ == '__main__': # Select file name file_selected = askopenfilename( - title='Select file to open', - filetypes=[('RAW', '*.NEF'), ('RAW', '*.RAW'), ('All Files', '*.*')], + title='Select file to open', filetypes=[('RAW', '*.NEF'), ('RAW', '*.RAW'), ('All Files', '*.*')] ) if file_selected != '': # Create window diff --git a/opencsp/app/sofast/SofastGUI.py b/opencsp/app/sofast/SofastGUI.py index 316c7a9f..16e5a0e0 100644 --- a/opencsp/app/sofast/SofastGUI.py +++ b/opencsp/app/sofast/SofastGUI.py @@ -13,15 +13,9 @@ from opencsp.app.sofast.lib.Fringes import Fringes from opencsp.common.lib.camera.ImageAcquisitionAbstract import ImageAcquisitionAbstract -from opencsp.common.lib.camera.ImageAcquisition_DCAM_mono import ( - ImageAcquisition as ImageAcquisition_DCAM, -) -from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ( - ImageAcquisition as ImageAcquisition_DCAM_color, -) -from opencsp.common.lib.camera.ImageAcquisition_MSMF import ( - ImageAcquisition as ImageAcquisition_MSMF, -) +from opencsp.common.lib.camera.ImageAcquisition_DCAM_mono import ImageAcquisition as ImageAcquisition_DCAM +from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ImageAcquisition as ImageAcquisition_DCAM_color +from opencsp.common.lib.camera.ImageAcquisition_MSMF import ImageAcquisition as ImageAcquisition_MSMF from opencsp.common.lib.camera.image_processing import highlight_saturation from opencsp.common.lib.camera.LiveView import LiveView from opencsp.app.sofast.lib.ImageCalibrationAbstract import ImageCalibrationAbstract @@ -42,11 +36,7 @@ def __init__(self) -> 'SofastGUI': Instantiates GUI in new window """ # Define camera objects to choose from - self.cam_options = [ # Description of camera - 'DCAM Mono', - 'DCAM Color', - 'MSMF Mono', - ] + self.cam_options = ['DCAM Mono', 'DCAM Color', 'MSMF Mono'] # Description of camera self.cam_objects = [ # Camera object to use ImageAcquisition_DCAM, ImageAcquisition_DCAM_color, @@ -119,14 +109,7 @@ def _plot_hist(self, ax: plt.Axes, frame: np.ndarray) -> None: ax.set_ylabel('Counts') ax.set_title('Image Histogram') - def _plot_image( - self, - ax: plt.Axes, - image: np.ndarray, - title: str = '', - xlabel: str = '', - ylabel: str = '', - ) -> None: + def _plot_image(self, ax: plt.Axes, image: np.ndarray, title: str = '', xlabel: str = '', ylabel: str = '') -> None: """ Plots image on given axes. @@ -166,21 +149,15 @@ def _create_layout(self) -> None: label_frame_run.grid(row=3, column=0, sticky='nesw', padx=5, pady=5) # Settings label frame label_frame_settings = tkinter.LabelFrame(self.root, text='Settings') - label_frame_settings.grid( - row=0, column=1, rowspan=3, sticky='nesw', padx=5, pady=5 - ) + label_frame_settings.grid(row=0, column=1, rowspan=3, sticky='nesw', padx=5, pady=5) # =============== First Column - Load components =============== r = 0 # Connect camera button self.btn_load_image_acquisition = tkinter.Button( - label_frame_load_system, - text='Connect Camera', - command=self.load_image_acquisition, - ) - self.btn_load_image_acquisition.grid( - row=r, column=0, pady=2, padx=2, sticky='nesw' + label_frame_load_system, text='Connect Camera', command=self.load_image_acquisition ) + self.btn_load_image_acquisition.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip( self.btn_load_image_acquisition, 'Select the type of camera in the dropdown menu to the right. Click to connect to the camera.', @@ -189,35 +166,22 @@ def _create_layout(self) -> None: # Load physical layout self.btn_load_image_projection = tkinter.Button( - label_frame_load_system, - text='Load ImageProjection', - command=self.load_image_projection, - ) - self.btn_load_image_projection.grid( - row=r, column=0, pady=2, padx=2, sticky='nesw' - ) - TkToolTip( - self.btn_load_image_projection, - 'Select an ImageProjection HDF file to load and display.', + label_frame_load_system, text='Load ImageProjection', command=self.load_image_projection ) + self.btn_load_image_projection.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') + TkToolTip(self.btn_load_image_projection, 'Select an ImageProjection HDF file to load and display.') r += 1 # =============== First Column - Projection controls =============== r = 0 self.btn_show_cal_image = tkinter.Button( - label_frame_projector, - text='Show Calibration Image', - command=self.show_calibration_image, + label_frame_projector, text='Show Calibration Image', command=self.show_calibration_image ) self.btn_show_cal_image.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') - TkToolTip( - self.btn_show_cal_image, 'Shows calibration image on projection window.' - ) + TkToolTip(self.btn_show_cal_image, 'Shows calibration image on projection window.') r += 1 - self.btn_show_axes = tkinter.Button( - label_frame_projector, text='Show Screen Axes', command=self.show_axes - ) + self.btn_show_axes = tkinter.Button(label_frame_projector, text='Show Screen Axes', command=self.show_axes) self.btn_show_axes.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip(self.btn_show_axes, 'Shows screen axes on projection window.') r += 1 @@ -230,9 +194,7 @@ def _create_layout(self) -> None: r += 1 self.btn_close_projection = tkinter.Button( - label_frame_projector, - text='Close Display Window', - command=self.close_projection_window, + label_frame_projector, text='Close Display Window', command=self.close_projection_window ) self.btn_close_projection.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip(self.btn_close_projection, 'Close only projection window.') @@ -242,59 +204,41 @@ def _create_layout(self) -> None: r = 0 # Perform exposure calibration self.btn_exposure_cal = tkinter.Button( - label_frame_camera, - text='Calibrate Exposure Time', - command=self.run_exposure_cal, + label_frame_camera, text='Calibrate Exposure Time', command=self.run_exposure_cal ) self.btn_exposure_cal.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') - TkToolTip( - self.btn_exposure_cal, 'Automatically performs camera exposure calibration.' - ) + TkToolTip(self.btn_exposure_cal, 'Automatically performs camera exposure calibration.') r += 1 # Set camera exposure - self.btn_set_exposure = tkinter.Button( - label_frame_camera, text='Set Exposure Time', command=self.set_exposure - ) + self.btn_set_exposure = tkinter.Button(label_frame_camera, text='Set Exposure Time', command=self.set_exposure) self.btn_set_exposure.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip( - self.btn_set_exposure, - 'Set the camera exposure time value. The current value is displayed when clicked.', + self.btn_set_exposure, 'Set the camera exposure time value. The current value is displayed when clicked.' ) r += 1 # Show snapshot button - self.btn_show_snapshot = tkinter.Button( - label_frame_camera, text='Show Snapshot', command=self.show_snapshot - ) + self.btn_show_snapshot = tkinter.Button(label_frame_camera, text='Show Snapshot', command=self.show_snapshot) self.btn_show_snapshot.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') - TkToolTip( - self.btn_show_snapshot, - 'Shows a camera image and pixel brightness histogram.', - ) + TkToolTip(self.btn_show_snapshot, 'Shows a camera image and pixel brightness histogram.') r += 1 # Save snapshot button - self.btn_save_snapshot = tkinter.Button( - label_frame_camera, text='Save Snapshot', command=self.save_snapshot - ) + self.btn_save_snapshot = tkinter.Button(label_frame_camera, text='Save Snapshot', command=self.save_snapshot) self.btn_save_snapshot.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip(self.btn_save_snapshot, 'Saves image from camera as a PNG.') r += 1 # Live view button - self.btn_live_view = tkinter.Button( - label_frame_camera, text='Live View', command=self.live_view - ) + self.btn_live_view = tkinter.Button(label_frame_camera, text='Live View', command=self.live_view) self.btn_live_view.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip(self.btn_live_view, 'Shows live view from connected camera.') r += 1 # Perform exposure calibration self.btn_close_camera = tkinter.Button( - label_frame_camera, - text='Close Camera', - command=self.close_image_acquisition, + label_frame_camera, text='Close Camera', command=self.close_image_acquisition ) self.btn_close_camera.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip(self.btn_close_camera, 'Closes connection to camera.') @@ -307,17 +251,12 @@ def _create_layout(self) -> None: label_frame_run, text='Run Data Capture', command=self.run_measurement ) self.btn_run_measurement.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') - TkToolTip( - self.btn_run_measurement, - 'Runs SOFAST data capture. Mask then fringes are captured.', - ) + TkToolTip(self.btn_run_measurement, 'Runs SOFAST data capture. Mask then fringes are captured.') r += 1 # Perform projector-camera brightness calibration self.btn_gray_levels_cal = tkinter.Button( - label_frame_run, - text='Run Response Calibration', - command=self.run_gray_levels_cal, + label_frame_run, text='Run Response Calibration', command=self.run_gray_levels_cal ) self.btn_gray_levels_cal.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip( @@ -328,13 +267,9 @@ def _create_layout(self) -> None: # Load projector-camera brightness calibration self.btn_load_gray_levels_cal = tkinter.Button( - label_frame_run, - text='Load Response Calibration', - command=self.load_gray_levels_cal, - ) - self.btn_load_gray_levels_cal.grid( - row=r, column=0, pady=2, padx=2, sticky='nesw' + label_frame_run, text='Load Response Calibration', command=self.load_gray_levels_cal ) + self.btn_load_gray_levels_cal.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip( self.btn_load_gray_levels_cal, 'Loads a previously saved Projector-Camera Brightness Calibration sequence data file.', @@ -343,16 +278,11 @@ def _create_layout(self) -> None: # View projector-camera brightness calibration self.btn_view_gray_levels_cal = tkinter.Button( - label_frame_run, - text='View Response Calibration', - command=self.view_gray_levels_cal, - ) - self.btn_view_gray_levels_cal.grid( - row=r, column=0, pady=2, padx=2, sticky='nesw' + label_frame_run, text='View Response Calibration', command=self.view_gray_levels_cal ) + self.btn_view_gray_levels_cal.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') TkToolTip( - self.btn_view_gray_levels_cal, - 'Views current Projector-Camera Brightness Calibration sequence data file.', + self.btn_view_gray_levels_cal, 'Views current Projector-Camera Brightness Calibration sequence data file.' ) r += 1 @@ -366,12 +296,8 @@ def _create_layout(self) -> None: r = 0 # Camera type dropdown self.var_cam_select = tkinter.StringVar(value=self.cam_options[0]) - lbl_camera_type = tkinter.Label( - label_frame_settings, text='Select camera:', font=('calibre', 10, 'bold') - ) - drop_camera_type = tkinter.OptionMenu( - label_frame_settings, self.var_cam_select, *self.cam_options - ) + lbl_camera_type = tkinter.Label(label_frame_settings, text='Select camera:', font=('calibre', 10, 'bold')) + drop_camera_type = tkinter.OptionMenu(label_frame_settings, self.var_cam_select, *self.cam_options) TkToolTip(drop_camera_type, 'Select type of camera object to load.') lbl_camera_type.grid(row=r, column=1, pady=2, padx=2, sticky='nse') @@ -381,17 +307,10 @@ def _create_layout(self) -> None: # Calibration type dropdown self.var_cal_select = tkinter.StringVar(value=self.cal_options[0]) lbl_cal_type = tkinter.Label( - label_frame_settings, - text='Select calibration method:', - font=('calibre', 10, 'bold'), - ) - drop_cal_type = tkinter.OptionMenu( - label_frame_settings, self.var_cal_select, *self.cal_options - ) - TkToolTip( - drop_cal_type, - 'Select type of Projector-Camera Brightness Calibration process to use.', + label_frame_settings, text='Select calibration method:', font=('calibre', 10, 'bold') ) + drop_cal_type = tkinter.OptionMenu(label_frame_settings, self.var_cal_select, *self.cal_options) + TkToolTip(drop_cal_type, 'Select type of Projector-Camera Brightness Calibration process to use.') lbl_cal_type.grid(row=r, column=1, pady=2, padx=2, sticky='nse') drop_cal_type.grid(row=r, column=2, pady=2, padx=2, sticky='nsw') @@ -400,12 +319,8 @@ def _create_layout(self) -> None: # Fringe periods input box self.var_fringe_periods_x = tkinter.IntVar(value=4) self.var_fringe_periods_y = tkinter.IntVar(value=4) - lbl_fringe_x = tkinter.Label( - label_frame_settings, text='Fringe X periods:', font=('calibre', 10, 'bold') - ) - lbl_fringe_y = tkinter.Label( - label_frame_settings, text='Fringe Y periods:', font=('calibre', 10, 'bold') - ) + lbl_fringe_x = tkinter.Label(label_frame_settings, text='Fringe X periods:', font=('calibre', 10, 'bold')) + lbl_fringe_y = tkinter.Label(label_frame_settings, text='Fringe Y periods:', font=('calibre', 10, 'bold')) entry_fringe_x = tkinter.Spinbox( label_frame_settings, textvariable=self.var_fringe_periods_x, @@ -439,30 +354,19 @@ def _create_layout(self) -> None: r += 1 # Camera calibration input box - self.var_gray_lvl_cal_status = tkinter.StringVar( - value='Calibration data: No Data' - ) + self.var_gray_lvl_cal_status = tkinter.StringVar(value='Calibration data: No Data') lbl_gray_lvl_cal_status = tkinter.Label( - label_frame_settings, - textvariable=self.var_gray_lvl_cal_status, - font=('calibre', 10, 'bold'), + label_frame_settings, textvariable=self.var_gray_lvl_cal_status, font=('calibre', 10, 'bold') ) - lbl_gray_lvl_cal_status.grid( - row=r, column=1, pady=2, padx=2, sticky='nsw', columnspan=2 - ) + lbl_gray_lvl_cal_status.grid(row=r, column=1, pady=2, padx=2, sticky='nsw', columnspan=2) r += 1 # Measure point input self.var_meas_pt = tkinter.StringVar(value='0.0, 0.0, 0.0') - lbl_meas_pt = tkinter.Label( - label_frame_settings, text='Measure Point XYZ', font=('calibre', 10, 'bold') - ) + lbl_meas_pt = tkinter.Label(label_frame_settings, text='Measure Point XYZ', font=('calibre', 10, 'bold')) entry_meas_pt = tkinter.Entry( - label_frame_settings, - textvariable=self.var_meas_pt, - font=('calibre', 10, 'normal'), - width=20, + label_frame_settings, textvariable=self.var_meas_pt, font=('calibre', 10, 'normal'), width=20 ) TkToolTip( entry_meas_pt, @@ -477,15 +381,10 @@ def _create_layout(self) -> None: # Distance input self.var_meas_dist = tkinter.StringVar(value='10.0') lbl_meas_dist = tkinter.Label( - label_frame_settings, - text='Measured mirror-screen distance', - font=('calibre', 10, 'bold'), + label_frame_settings, text='Measured mirror-screen distance', font=('calibre', 10, 'bold') ) entry_meas_dist = tkinter.Entry( - label_frame_settings, - textvariable=self.var_meas_dist, - font=('calibre', 10, 'normal'), - width=20, + label_frame_settings, textvariable=self.var_meas_dist, font=('calibre', 10, 'normal'), width=20 ) TkToolTip( entry_meas_dist, @@ -499,19 +398,11 @@ def _create_layout(self) -> None: # Measurement name self.var_meas_name = tkinter.StringVar(value='') - lbl_meas_name = tkinter.Label( - label_frame_settings, text='Measurement name', font=('calibre', 10, 'bold') - ) + lbl_meas_name = tkinter.Label(label_frame_settings, text='Measurement name', font=('calibre', 10, 'bold')) entry_meas_name = tkinter.Entry( - label_frame_settings, - textvariable=self.var_meas_name, - font=('calibre', 10, 'normal'), - width=20, - ) - TkToolTip( - entry_meas_name, - 'The name of the measurement to be saved in the measurement HDF file.', + label_frame_settings, textvariable=self.var_meas_name, font=('calibre', 10, 'normal'), width=20 ) + TkToolTip(entry_meas_name, 'The name of the measurement to be saved in the measurement HDF file.') lbl_meas_name.grid(row=r, column=1, pady=2, padx=2, sticky='nsw', columnspan=2) r += 1 @@ -563,9 +454,7 @@ def _enable_btns(self) -> None: def _check_system_loaded(self) -> None: """Checks if the system class has been instantiated""" if self.system is None: - messagebox.showerror( - 'Error', 'Both ImageAcquisiton and ImageProjection must both be loaded.' - ) + messagebox.showerror('Error', 'Both ImageAcquisiton and ImageProjection must both be loaded.') return def _check_calibration_loaded(self) -> bool: @@ -573,9 +462,7 @@ def _check_calibration_loaded(self) -> bool: returns False and shows error message if not loaded. """ if self.calibration is None: # Not loaded - messagebox.showerror( - 'Error', 'Camera-Projector calibration must be loaded/performed.' - ) + messagebox.showerror('Error', 'Camera-Projector calibration must be loaded/performed.') return False else: # Loaded return True @@ -587,17 +474,12 @@ def _load_system_elements(self) -> None: """ if self.image_acquisition is not None and self.image_projection is not None: - self.system = SystemSofastFringe( - self.image_projection, self.image_acquisition - ) + self.system = SystemSofastFringe(self.image_projection, self.image_acquisition) def _save_measurement_data(self, file: str) -> None: """Saves last measurement to HDF file""" # Check measurement images have been captured - if ( - self.system.fringe_images_captured is None - or self.system.mask_images_captured is None - ): + if self.system.fringe_images_captured is None or self.system.mask_images_captured is None: raise ValueError('Measurement data has not been captured.') elif self.calibration is None: raise ValueError('Calibration data has not been processed.') @@ -637,9 +519,7 @@ def load_image_acquisition(self) -> None: def load_image_projection(self) -> None: """Loads and displays ImageProjection""" # Get file name - file = askopenfilename( - defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")] - ) + file = askopenfilename(defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")]) # Load file and display if file != '': @@ -648,9 +528,7 @@ def load_image_projection(self) -> None: # Create new window projector_root = tkinter.Toplevel(self.root) # Show window - self.image_projection = ImageProjection( - projector_root, image_projection_data - ) + self.image_projection = ImageProjection(projector_root, image_projection_data) print(f'ImageProjection loaded:\n {file}') @@ -684,9 +562,7 @@ def run_measurement(self) -> None: """Runs data collect and saved data.""" # Get save file name - file = asksaveasfilename( - defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")] - ) + file = asksaveasfilename(defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")]) if file == '': return @@ -741,12 +617,8 @@ def run_exposure_cal(self) -> None: def run_gray_levels_cal(self) -> None: """Runs the projector-camera intensity calibration""" # Get save file name - file_default = dt.datetime.now().strftime( - 'projector_camera_response_%Y_%m_%d-%H_%M_%S.h5' - ) - file = asksaveasfilename( - initialfile=file_default, filetypes=[("HDF5 File", "*.h5")] - ) + file_default = dt.datetime.now().strftime('projector_camera_response_%Y_%m_%d-%H_%M_%S.h5') + file = asksaveasfilename(initialfile=file_default, filetypes=[("HDF5 File", "*.h5")]) if file == '': print('No file selected.') @@ -762,29 +634,20 @@ def run_gray_levels_cal(self) -> None: # Capture images def _func_0(): print('Calibrating...') - self.system.run_display_camera_response_calibration( - res=10, run_next=self.system.run_next_in_queue - ) + self.system.run_display_camera_response_calibration(res=10, run_next=self.system.run_next_in_queue) # Process data def _func_1(): print('Processing calibration data') # Get calibration images from System - calibration_images = self.system.get_calibration_images()[ - 0 - ] # only calibrating one camera + calibration_images = self.system.get_calibration_images()[0] # only calibrating one camera # Load calibration object - self.calibration = cal_object.from_data( - calibration_images, self.system.calibration_display_values - ) + self.calibration = cal_object.from_data(calibration_images, self.system.calibration_display_values) # Save calibration object self.calibration.save_to_hdf(file) # Save calibration raw data data = [self.system.calibration_display_values, calibration_images] - datasets = [ - 'CalibrationRawData/display_values', - 'CalibrationRawData/images', - ] + datasets = ['CalibrationRawData/display_values', 'CalibrationRawData/images'] hdf5_tools.save_hdf5_datasets(data, datasets, file) # Show crosshairs self.show_crosshairs() @@ -800,17 +663,13 @@ def _func_1(): def load_gray_levels_cal(self) -> None: """Loads saved results of a projector-camera intensity calibration""" # Get file name - file = askopenfilename( - defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")] - ) + file = askopenfilename(defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")]) if file == '': return # Load file - cal_type = hdf5_tools.load_hdf5_datasets( - ['Calibration/calibration_type'], file - )['calibration_type'] + cal_type = hdf5_tools.load_hdf5_datasets(['Calibration/calibration_type'], file)['calibration_type'] if cal_type == 'ImageCalibrationGlobal': self.calibration = ImageCalibrationGlobal.load_from_hdf(file) @@ -848,8 +707,7 @@ def set_exposure(self) -> None: # Get new exposure_time value new_exp = simpledialog.askfloat( - title="Set exposure value", - prompt=f"Current exposure: {cur_exp:.1f}. New Value:", + title="Set exposure value", prompt=f"Current exposure: {cur_exp:.1f}. New Value:" ) # Set new exposure_time value @@ -862,9 +720,7 @@ def save_snapshot(self) -> None: frame = self._get_frame() # Get save file name - file = asksaveasfilename( - defaultextension='.png', filetypes=[("All tpyes", "*.*")] - ) + file = asksaveasfilename(defaultextension='.png', filetypes=[("All tpyes", "*.*")]) # Save image if file != '': @@ -892,9 +748,7 @@ def show_snapshot(self) -> None: ax2 = fig.add_subplot(122) # Plot image - self._plot_image( - ax1, frame_rgb, 'Current camera view', 'X (pixels)', 'Y (pixels)' - ) + self._plot_image(ax1, frame_rgb, 'Current camera view', 'X (pixels)', 'Y (pixels)') # Plot histogram self._plot_hist(ax2, frame) diff --git a/opencsp/app/sofast/lib/BlobIndex.py b/opencsp/app/sofast/lib/BlobIndex.py index e1334bb0..ad1de50b 100644 --- a/opencsp/app/sofast/lib/BlobIndex.py +++ b/opencsp/app/sofast/lib/BlobIndex.py @@ -25,9 +25,7 @@ class BlobIndex: To filter bad points (experimental) """ - def __init__( - self, points: Vxy, x_min: int, x_max: int, y_min: int, y_max: int - ) -> 'BlobIndex': + def __init__(self, points: Vxy, x_min: int, x_max: int, y_min: int, y_max: int) -> 'BlobIndex': """Instantiates BlobIndex class Parameters @@ -47,9 +45,7 @@ def __init__( self._idx_x = np.zeros(self._num_pts) * np.nan self._idx_y = np.zeros(self._num_pts) * np.nan self._is_assigned = np.zeros(self._num_pts, dtype=bool) - self._neighbor_dists = ( - np.zeros((self._num_pts, 4)) * np.nan - ) # left, right, up, down + self._neighbor_dists = np.zeros((self._num_pts, 4)) * np.nan # left, right, up, down self.search_thresh = 5.0 # pixels self.search_perp_axis_ratio = 3.0 @@ -60,12 +56,8 @@ def __init__( idx_x_vec = np.arange(x_min, x_max + 1) # index idx_y_vec = np.arange(y_min, y_max + 1) # index self._idx_x_mat, self._idx_y_mat = np.meshgrid(idx_x_vec, idx_y_vec) # index - self._points_mat = ( - np.zeros((y_max - y_min + 1, x_max - x_min + 1, 2)) * np.nan - ) # pixels - self._point_indices_mat = ( - np.zeros((y_max - y_min + 1, x_max - x_min + 1)) * np.nan # index - ) + self._points_mat = np.zeros((y_max - y_min + 1, x_max - x_min + 1, 2)) * np.nan # pixels + self._point_indices_mat = np.zeros((y_max - y_min + 1, x_max - x_min + 1)) * np.nan # index def _get_assigned_point_indices(self) -> np.ndarray[int]: """Returns found point indices""" @@ -115,15 +107,11 @@ def _nearest_unassigned_idx_from_xy_point_direction( # Calculate xy deltas for expected/current point points_rel = points - pt_cur # Vectors, current point to all points v_search = pt_exp - pt_cur # Vector, from current point to expected point - v_perp = v_search.rotate( - np.array([[0, -1], [1, 0]]) - ) # Vector, perpendicular to search axis + v_perp = v_search.rotate(np.array([[0, -1], [1, 0]])) # Vector, perpendicular to search axis dists_axis = v_search.dot(points_rel) # Distance of points along search axis dists_perp = np.abs(v_perp.dot(points_rel)) # Distance of points from line # Make mask of valid points - mask = np.logical_and( - dists_axis > 0, dists_perp / dists_axis <= self.search_perp_axis_ratio - ) + mask = np.logical_and(dists_axis > 0, dists_perp / dists_axis <= self.search_perp_axis_ratio) # Check there are points to find if mask.sum() == 0: return False, (None, None) @@ -157,9 +145,7 @@ def _assign(self, idx_pt: int, idx_x: int, idx_y: int) -> None: self._idx_y[idx_pt] = idx_y self._is_assigned[idx_pt] = True # Assign matrices - self._points_mat[idx_y + self._offset_y, idx_x + self._offset_x] = self._points[ - idx_pt - ].data.squeeze() + self._points_mat[idx_y + self._offset_y, idx_x + self._offset_x] = self._points[idx_pt].data.squeeze() self._point_indices_mat[idx_y + self._offset_y, idx_x + self._offset_x] = idx_pt lt.debug(f'Blob number {idx_pt:d} was assigned ({idx_x:d}, {idx_y:d})') @@ -179,9 +165,7 @@ def _unassign(self, idx_pt: int) -> None: if np.isnan(idx_mat_x) or np.isnan(idx_mat_y): return - self._points_mat[int(idx_mat_y), int(idx_mat_x)] = self._points[ - idx_pt - ].data.squeeze() + self._points_mat[int(idx_mat_y), int(idx_mat_x)] = self._points[idx_pt].data.squeeze() self._point_indices_mat[int(idx_mat_y), int(idx_mat_x)] = idx_pt # Unassign vectors @@ -201,9 +185,7 @@ def _assign_center(self, pt_origin: Vxy) -> None: """ idx, dist = self._nearest_unassigned_idx_from_xy_point(pt_origin) if dist > self.search_thresh: - warn( - f'Assigning point {idx:d} to index (0, 0) resulted in {dist:.2f} pixels error.' - ) + warn(f'Assigning point {idx:d} to index (0, 0) resulted in {dist:.2f} pixels error.') self._assign(idx, 0, 0) def _find_nearest_in_direction( @@ -236,32 +218,20 @@ def _find_nearest_in_direction( idx_y = self._idx_y[idx_pt] if direction == 'right': - mask = np.logical_and( - unassigned_deltas.x > 0, - unassigned_deltas.x > (2 * np.abs(unassigned_deltas.y)), - ) + mask = np.logical_and(unassigned_deltas.x > 0, unassigned_deltas.x > (2 * np.abs(unassigned_deltas.y))) idx_x_out = idx_x + 1 idx_y_out = idx_y elif direction == 'left': - mask = np.logical_and( - unassigned_deltas.x < 0, - -unassigned_deltas.x > (2 * np.abs(unassigned_deltas.y)), - ) + mask = np.logical_and(unassigned_deltas.x < 0, -unassigned_deltas.x > (2 * np.abs(unassigned_deltas.y))) idx_x_out = idx_x - 1 idx_y_out = idx_y elif direction == 'up': - mask = np.logical_and( - unassigned_deltas.y > 0, - unassigned_deltas.y > (2 * np.abs(unassigned_deltas.x)), - ) + mask = np.logical_and(unassigned_deltas.y > 0, unassigned_deltas.y > (2 * np.abs(unassigned_deltas.x))) idx_x_out = idx_x idx_y_out = idx_y + 1 # Down elif direction == 'down': - mask = np.logical_and( - unassigned_deltas.y < 0, - -unassigned_deltas.y > (2 * np.abs(unassigned_deltas.x)), - ) + mask = np.logical_and(unassigned_deltas.y < 0, -unassigned_deltas.y > (2 * np.abs(unassigned_deltas.x))) idx_x_out = idx_x idx_y_out = idx_y - 1 @@ -334,8 +304,7 @@ def _extend_data(self, direction: Literal['x', 'y'], step: Literal[1, -1]) -> No idxs_a = self._idx_x idxs_b = self._idx_y else: - lt.error_and_raise( - ValueError, f'Given "direction" must be either "x" or "y", not {direction}') + lt.error_and_raise(ValueError, f'Given "direction" must be either "x" or "y", not {direction}') # Step through direction # TODO Can speed up with matrix data storage @@ -365,10 +334,8 @@ def _extend_data(self, direction: Literal['x', 'y'], step: Literal[1, -1]) -> No pt_cur = self._points[idx_new] # Calculate deltas pt_exp = self._exp_pt_from_pt_pair(pt_cur, pt_prev) - success, (idx_new, dist) = ( - self._nearest_unassigned_idx_from_xy_point_direction( - pt_cur, pt_exp - ) + success, (idx_new, dist) = self._nearest_unassigned_idx_from_xy_point_direction( + pt_cur, pt_exp ) if not success: break @@ -417,12 +384,8 @@ def _filter_bad_points(self) -> None: mask_bad_pixels_y = np.abs(del_2_y) > thresh # Calculate mask of bad pixels using x and y derivatives - mask_bad_pixels_x = np.concatenate( - (np.zeros((ny, 2, 2), dtype=bool), mask_bad_pixels_x), axis=1 - ) - mask_bad_pixels_y = np.concatenate( - (np.zeros((2, nx, 2), dtype=bool), mask_bad_pixels_y), axis=0 - ) + mask_bad_pixels_x = np.concatenate((np.zeros((ny, 2, 2), dtype=bool), mask_bad_pixels_x), axis=1) + mask_bad_pixels_y = np.concatenate((np.zeros((2, nx, 2), dtype=bool), mask_bad_pixels_y), axis=0) # Combine into one mask mask_bad_pixels = (mask_bad_pixels_x + mask_bad_pixels_y).max(2) @@ -475,9 +438,7 @@ def plot_points_labels(self, labels: bool = False) -> None: plt.scatter(*self._points[self._is_assigned].data, color='blue') if labels: for x, y, pt in zip( - self._idx_x[self._is_assigned], - self._idx_y[self._is_assigned], - self._points[self._is_assigned], + self._idx_x[self._is_assigned], self._idx_y[self._is_assigned], self._points[self._is_assigned] ): plt.text(*pt.data, f'({x:.0f}, {y:.0f})') diff --git a/opencsp/app/sofast/lib/CalibrateDisplayShape.py b/opencsp/app/sofast/lib/CalibrateDisplayShape.py index 6ea151c4..5a07650a 100644 --- a/opencsp/app/sofast/lib/CalibrateDisplayShape.py +++ b/opencsp/app/sofast/lib/CalibrateDisplayShape.py @@ -113,8 +113,7 @@ def __init__(self, data_input: DataInput) -> 'CalibrateDisplayShape': # Load cal params cal_pattern_params = CalParams( - self.data_input.image_projection_data['size_x'], - self.data_input.image_projection_data['size_y'], + self.data_input.image_projection_data['size_x'], self.data_input.image_projection_data['size_y'] ) # Initialize calculation data structure @@ -171,10 +170,7 @@ def interpolate_camera_pixel_positions(self) -> None: # Interpolate full resolution points pts_uv_pixel_full.append( interp_xy_screen_positions( - im_x, - im_y, - self.data_calculation.pts_screen_frac_x, - self.data_calculation.pts_screen_frac_y, + im_x, im_y, self.data_calculation.pts_screen_frac_x, self.data_calculation.pts_screen_frac_y ) ) @@ -184,8 +180,7 @@ def interpolate_camera_pixel_positions(self) -> None: # Save number of points self.data_calculation.num_points_screen = ( - self.data_calculation.pts_screen_frac_x.size - * self.data_calculation.pts_screen_frac_y.size + self.data_calculation.pts_screen_frac_x.size * self.data_calculation.pts_screen_frac_y.size ) self.data_calculation.num_poses = len(pts_uv_pixel_ori) @@ -218,28 +213,13 @@ def locate_camera_positions(self) -> None: tvecs_0 = np.array(tvecs_0) # Format data for optimization - point_indices = np.tile( - np.arange(pts_used_idxs.size), self.data_calculation.num_poses - ) - camera_indices = np.repeat( - np.arange(self.data_calculation.num_poses), pts_used_idxs.size - ) - points_2d = np.vstack( - [ - vec[pts_used_idxs].data.T - for vec in self.data_calculation.pts_uv_pixel_orientation - ] - ) + point_indices = np.tile(np.arange(pts_used_idxs.size), self.data_calculation.num_poses) + camera_indices = np.repeat(np.arange(self.data_calculation.num_poses), pts_used_idxs.size) + points_2d = np.vstack([vec[pts_used_idxs].data.T for vec in self.data_calculation.pts_uv_pixel_orientation]) # Calculate error after rough camera alignment errors_0 = ph.reprojection_errors( - rvecs_0, - tvecs_0, - pts_obj_ori_0, - self.data_input.camera, - camera_indices, - point_indices, - points_2d, + rvecs_0, tvecs_0, pts_obj_ori_0, self.data_input.camera, camera_indices, point_indices, points_2d ) error_0 = np.sqrt(np.mean(errors_0**2)) lt.info(f'Reprojection error stage 1 rough alignment: {error_0:.2f} pixels') @@ -264,13 +244,7 @@ def locate_camera_positions(self) -> None: # Calculate error errors_1 = ph.reprojection_errors( - rvecs_1, - tvecs_1, - pts_obj_ori_1, - self.data_input.camera, - camera_indices, - point_indices, - points_2d, + rvecs_1, tvecs_1, pts_obj_ori_1, self.data_input.camera, camera_indices, point_indices, points_2d ) error_1 = np.sqrt(np.mean(errors_1**2)) lt.info(f'Reprojection error stage 2 bundle adjustment: {error_1:.2f} pixels') @@ -291,18 +265,10 @@ def calculate_3d_screen_points(self) -> None: v_screen_cam_screen = Vxyz(np.concatenate(v_screen_cam_screen, 1)) # Find pointing vectors for points in screen coordinates - u_cam_pt_screen_mat = np.zeros( - ( - self.data_calculation.num_points_screen, - self.data_calculation.num_poses, - 3, - ) - ) + u_cam_pt_screen_mat = np.zeros((self.data_calculation.num_points_screen, self.data_calculation.num_poses, 3)) # Loop through all camera poses - for idx_pose, (pts, rot) in enumerate( - zip(self.data_calculation.pts_uv_pixel, self.data_calculation.rvecs) - ): + for idx_pose, (pts, rot) in enumerate(zip(self.data_calculation.pts_uv_pixel, self.data_calculation.rvecs)): # Get camera pointing vectors in camera coordinates v_cam_pt_cam = self.data_input.camera.vector_from_pixel(pts) @@ -314,17 +280,12 @@ def calculate_3d_screen_points(self) -> None: # Calculate high-res intersection points v_screen_pt_screen_mat = np.zeros((self.data_calculation.num_points_screen, 3)) - intersection_dists = np.zeros( - (self.data_calculation.num_points_screen, self.data_calculation.num_poses) - ) + intersection_dists = np.zeros((self.data_calculation.num_points_screen, self.data_calculation.num_poses)) - for idx_pt in tqdm( - range(self.data_calculation.num_points_screen), desc='Intersecting rays' - ): + for idx_pt in tqdm(range(self.data_calculation.num_points_screen), desc='Intersecting rays'): # Intersect points pt, dists = ph.nearest_ray_intersection( - v_screen_cam_screen, # length N - Vxyz(u_cam_pt_screen_mat[idx_pt].T), # length N + v_screen_cam_screen, Vxyz(u_cam_pt_screen_mat[idx_pt].T) # length N # length N ) v_screen_pt_screen_mat[idx_pt] = pt.data.squeeze() @@ -336,9 +297,7 @@ def calculate_3d_screen_points(self) -> None: # Save data self.data_calculation.pts_xyz_screen_aligned = Vxyz(v_screen_pt_screen_mat.T) self.data_calculation.intersection_dists_mean = dist_error_mean - self.data_calculation.intersection_points_mask = ( - dist_error_mean < self.data_input.ray_intersection_threshold - ) + self.data_calculation.intersection_points_mask = dist_error_mean < self.data_input.ray_intersection_threshold def assemble_xyz_data_into_images(self) -> None: """Assembles data into 2d arrays""" @@ -364,8 +323,7 @@ def get_data(self) -> dict: - pts_xyz_screen_coords: Vxyz """ pts_x_screen_frac, pts_y_screen_frac = np.meshgrid( - self.data_calculation.pts_screen_frac_x, - self.data_calculation.pts_screen_frac_y, + self.data_calculation.pts_screen_frac_x, self.data_calculation.pts_screen_frac_y ) # Screen fractions pts_y_screen_frac = np.flip(pts_y_screen_frac, axis=0) # Flip y coordinate only pts_xy_screen_fraction = Vxy( @@ -374,15 +332,10 @@ def get_data(self) -> dict: ] ) pts_xyz_screen = Vxyz( - self.data_calculation.pts_xyz_screen_aligned.data[ - :, self.data_calculation.intersection_points_mask - ] + self.data_calculation.pts_xyz_screen_aligned.data[:, self.data_calculation.intersection_points_mask] ) - return { - 'pts_xy_screen_fraction': pts_xy_screen_fraction, - 'pts_xyz_screen_coords': pts_xyz_screen, - } + return {'pts_xy_screen_fraction': pts_xy_screen_fraction, 'pts_xyz_screen_coords': pts_xyz_screen} def save_data_as_hdf(self, file: str) -> None: """Saves distortion data to given HDF file""" @@ -416,9 +369,7 @@ def visualize_annotated_camera_images(self) -> None: """Annotates images of screen with screen points""" # Visualize each camera pose for idx_pose in range(self.data_calculation.num_poses): - fig = plt.figure( - f'CalibrationScreenShape_Annotated_Camera_{idx_pose:d}_Images' - ) + fig = plt.figure(f'CalibrationScreenShape_Annotated_Camera_{idx_pose:d}_Images') self.figures.append(fig) ax = fig.gca() # Get measurement @@ -427,17 +378,14 @@ def visualize_annotated_camera_images(self) -> None: ax.imshow(meas.mask_images[..., 1], cmap='gray') # Plot points for idx_point, pt in zip( - self.data_calculation.cal_pattern_params.index, - self.data_calculation.pts_uv_pixel_orientation[idx_pose], + self.data_calculation.cal_pattern_params.index, self.data_calculation.pts_uv_pixel_orientation[idx_pose] ): ax.scatter(*pt.data, color='k', s=20) ax.text(*(pt + Vxy([5, -10])).data, idx_point, size=8, color='w') def plot_ray_intersection_errors(self) -> None: """Plots camera ray intersection errors""" - fig = plt.figure( - 'CalibrationScreenShape_Ray_Intersection_Errors', figsize=(9, 3) - ) + fig = plt.figure('CalibrationScreenShape_Ray_Intersection_Errors', figsize=(9, 3)) self.figures.append(fig) ax = fig.gca() @@ -465,14 +413,9 @@ def visualize_final_scenario(self) -> None: x = pts_screen[mask_int_pts].x y = pts_screen[mask_int_pts].y + ax.scatter(*pts_screen[mask_int_pts].data[:2], marker='.', c='r', s=1, alpha=0.3) # Intersection points ax.scatter( - *pts_screen[mask_int_pts].data[:2], marker='.', c='r', s=1, alpha=0.3 - ) # Intersection points - ax.scatter( - *pts_screen_orientation.data[:2], - marker='s', - s=20, - label='Calibration Points', + *pts_screen_orientation.data[:2], marker='s', s=20, label='Calibration Points' ) # Screen calibration points ax.set_title('Screen Points Summary') ax.grid() @@ -518,16 +461,13 @@ def format_image(axis: plt.Axes, im): fig = plt.figure('CalibrationScreenShape_Screen_Map_Z') self.figures.append(fig) ax = fig.gca() - im = ax.imshow( - self.data_calculation.im_z_screen_pts * 1000, extent=extent, cmap='jet' - ) + im = ax.imshow(self.data_calculation.im_z_screen_pts * 1000, extent=extent, cmap='jet') format_image(ax, im) im.set_clim(-z_clim, z_clim) ax.set_title('Z (mm)') def run_calibration(self) -> None: - """Runs a complete calibration - """ + """Runs a complete calibration""" # Run calibration self.interpolate_camera_pixel_positions() self.locate_camera_positions() @@ -543,9 +483,7 @@ def run_calibration(self) -> None: self.visualize_xyz_screen_maps() -def interp_xy_screen_positions( - im_x: np.ndarray, im_y: np.ndarray, x_sc: np.ndarray, y_sc: np.ndarray -) -> Vxy: +def interp_xy_screen_positions(im_x: np.ndarray, im_y: np.ndarray, x_sc: np.ndarray, y_sc: np.ndarray) -> Vxy: """ Calculates the interpolated XY screen positions given X/Y fractional screen maps and X/Y interpolation vectors. @@ -573,12 +511,8 @@ def interp_xy_screen_positions( y_px = np.arange(im_y.shape[0]) + 0.5 # image pixels # Interpolate in X direction for every pixel row of image - x_px_y_px_x_sc = ( - np.zeros((y_px.size, x_sc.size)) * np.nan - ) # x pixel data, (y pixel, x screen) size array - y_px_y_px_x_sc = ( - np.zeros((y_px.size, x_sc.size)) * np.nan - ) # y pixel data, (y pixel, x screen) size array + x_px_y_px_x_sc = np.zeros((y_px.size, x_sc.size)) * np.nan # x pixel data, (y pixel, x screen) size array + y_px_y_px_x_sc = np.zeros((y_px.size, x_sc.size)) * np.nan # y pixel data, (y pixel, x screen) size array for idx_y in range(y_px.size): # Get x slices of x and y position values from images x_sc_vals = im_x[idx_y, :] # x screen fractions @@ -613,26 +547,18 @@ def interp_xy_screen_positions( x_px_vals = x_px_vals[mask_noise] # Interpolate x pixel coordinate - f = interpolate.interp1d( - x_sc_vals, x_px_vals, bounds_error=False, fill_value=np.nan - ) + f = interpolate.interp1d(x_sc_vals, x_px_vals, bounds_error=False, fill_value=np.nan) row = f(x_sc) # x pixel coordinate x_px_y_px_x_sc[idx_y, :] = row # Interpolate y screen fraction value - f = interpolate.interp1d( - x_sc_vals, y_sc_vals, bounds_error=False, fill_value=np.nan - ) + f = interpolate.interp1d(x_sc_vals, y_sc_vals, bounds_error=False, fill_value=np.nan) row = f(x_sc) y_px_y_px_x_sc[idx_y, :] = row # Interpolate in Y direction for every x-screen sample point column of image - x_px_y_sc_x_sc = np.zeros( - (y_sc.size, x_sc.size) - ) # x pixel data, (y screen, x screen) size array - y_px_y_sc_x_sc = np.zeros( - (y_sc.size, x_sc.size) - ) # y pixel data, (y screen, x screen) size array + x_px_y_sc_x_sc = np.zeros((y_sc.size, x_sc.size)) # x pixel data, (y screen, x screen) size array + y_px_y_sc_x_sc = np.zeros((y_sc.size, x_sc.size)) # y pixel data, (y screen, x screen) size array for idx_x in range(x_sc.size): # Get active pixel locations y_sc_vals = y_px_y_px_x_sc[:, idx_x] @@ -656,16 +582,12 @@ def interp_xy_screen_positions( y_px_vals = y_px_vals[mask_noise] # Interpolate x pixel coordinate - f = interpolate.interp1d( - y_sc_vals, x_px_vals, bounds_error=False, fill_value=np.nan - ) + f = interpolate.interp1d(y_sc_vals, x_px_vals, bounds_error=False, fill_value=np.nan) col = f(y_sc) x_px_y_sc_x_sc[:, idx_x] = col # Interpolate y pixel coordinate - f = interpolate.interp1d( - y_sc_vals, y_px_vals, bounds_error=False, fill_value=np.nan - ) + f = interpolate.interp1d(y_sc_vals, y_px_vals, bounds_error=False, fill_value=np.nan) col = f(y_sc) y_px_y_sc_x_sc[:, idx_x] = col diff --git a/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py b/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py index fbd2d140..ba6e4954 100644 --- a/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py +++ b/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py @@ -126,9 +126,7 @@ def __init__( self._dot_image_points_indices: Vxy self._dot_image_points_indices_x: ndarray self._dot_image_points_indices_y: ndarray - self._dot_points_xyz_mat = ( - np.ndarray((x_max - x_min + 1, y_max - y_min + 1, 3)) * np.nan - ) + self._dot_points_xyz_mat = np.ndarray((x_max - x_min + 1, y_max - y_min + 1, 3)) * np.nan self._num_dots: int self._rots_cams: list[Rotation] = [] self._vecs_cams: list[Vxyz] = [] @@ -145,9 +143,7 @@ def _find_dots_in_images(self) -> None: pts = ip.detect_blobs(self._images[idx_image].image, self.blob_detector) # Index all found points - blob_index = BlobIndex( - pts, -self._x_max, -self._x_min, self._y_min, self._y_max - ) + blob_index = BlobIndex(pts, -self._x_max, -self._x_min, self._y_min, self._y_max) blob_index.search_thresh = self.blob_search_threshold blob_index.run(origin_pt) points, indices = blob_index.get_data_mat() @@ -168,9 +164,7 @@ def _find_dots_in_images(self) -> None: for idx in range(self._num_images): dot_image_points_x = dot_image_points_xy_mat[idx][mask_all_assigned, 0] dot_image_points_y = dot_image_points_xy_mat[idx][mask_all_assigned, 1] - self._dot_image_points_xy.append( - Vxy((dot_image_points_x, dot_image_points_y)) - ) + self._dot_image_points_xy.append(Vxy((dot_image_points_x, dot_image_points_y))) # Save common indices as vector indices_x = indices[mask_all_assigned, 0] @@ -187,9 +181,7 @@ def _calculate_camera_poses(self) -> None: # Calculate camera pose ret = self._images[cam_idx].attempt_calculate_pose(True) if ret == -1: - lt.critical_and_raise( - ValueError, f'Camera pose {cam_idx:d} not calculated successfully' - ) + lt.critical_and_raise(ValueError, f'Camera pose {cam_idx:d} not calculated successfully') self._rots_cams.append(Rotation.from_rotvec(self._images[cam_idx].rvec)) self._vecs_cams.append(Vxyz(self._images[cam_idx].tvec)) @@ -197,18 +189,10 @@ def _calculate_camera_poses(self) -> None: # Calculate reproj error errors = self._images[cam_idx].calc_reprojection_errors() # Log errors - lt.info( - f'Camera {cam_idx:d} mean corner reprojection error: {errors.mean():.2f} pixels' - ) - lt.info( - f'Camera {cam_idx:d} min corner reprojection error: {errors.min():.2f} pixels' - ) - lt.info( - f'Camera {cam_idx:d} max corner reprojection error: {errors.mean():.2f} pixels' - ) - lt.info( - f'Camera {cam_idx:d} STDEV corner reprojection error: {errors.mean():.2f} pixels' - ) + lt.info(f'Camera {cam_idx:d} mean corner reprojection error: {errors.mean():.2f} pixels') + lt.info(f'Camera {cam_idx:d} min corner reprojection error: {errors.min():.2f} pixels') + lt.info(f'Camera {cam_idx:d} max corner reprojection error: {errors.mean():.2f} pixels') + lt.info(f'Camera {cam_idx:d} STDEV corner reprojection error: {errors.mean():.2f} pixels') def _intersect_rays(self) -> None: """Intersects camera rays to find dot xyz locations""" @@ -217,10 +201,7 @@ def _intersect_rays(self) -> None: for dot_idx in tqdm(range(self._num_dots), desc='Intersecting rays'): dot_image_pts_xy = [pt[dot_idx] for pt in self._dot_image_points_xy] point, dists = ph.triangulate( - [self._camera] * self._num_images, - self._rots_cams, - self._vecs_cams, - dot_image_pts_xy, + [self._camera] * self._num_images, self._rots_cams, self._vecs_cams, dot_image_pts_xy ) points_xyz.append(point) int_dists.append(dists) @@ -233,20 +214,12 @@ def _intersect_rays(self) -> None: self._dot_intersection_dists = np.array(int_dists) lt.info( - 'Dot ray intersections mean intersection error: ' - f'{self._dot_intersection_dists.mean() * 1000:.1f} mm' + 'Dot ray intersections mean intersection error: ' f'{self._dot_intersection_dists.mean() * 1000:.1f} mm' ) + lt.info('Dot ray intersections min intersection error: ' f'{self._dot_intersection_dists.min() * 1000:.1f} mm') + lt.info('Dot ray intersections max intersection error: ' f'{self._dot_intersection_dists.max() * 1000:.1f} mm') lt.info( - 'Dot ray intersections min intersection error: ' - f'{self._dot_intersection_dists.min() * 1000:.1f} mm' - ) - lt.info( - 'Dot ray intersections max intersection error: ' - f'{self._dot_intersection_dists.max() * 1000:.1f} mm' - ) - lt.info( - 'Dot ray intersections STDEV of intersection error: ' - f'{self._dot_intersection_dists.std() * 1000:.1f} mm' + 'Dot ray intersections STDEV of intersection error: ' f'{self._dot_intersection_dists.std() * 1000:.1f} mm' ) def _plot_common_dots(self) -> None: @@ -254,9 +227,7 @@ def _plot_common_dots(self) -> None: for idx_image in range(self._num_images): fig = plt.figure(f'image_{idx_image:d}_annotated_dots') plt.imshow(self._images[idx_image].image, cmap='gray') - plt.scatter( - *self._dot_image_points_xy[idx_image].data, marker='.', color='red' - ) + plt.scatter(*self._dot_image_points_xy[idx_image].data, marker='.', color='red') self.figures.append(fig) def _plot_marker_corners(self) -> None: @@ -272,9 +243,7 @@ def _plot_located_cameras_and_points(self) -> None: """Plots all input xyz points and located cameras""" fig = plt.figure('cameras_and_points') ax = fig.add_subplot(111, projection='3d') - ph.plot_pts_3d( - ax, self._pts_xyz_corners.data.T, self._rots_cams, self._vecs_cams - ) + ph.plot_pts_3d(ax, self._pts_xyz_corners.data.T, self._rots_cams, self._vecs_cams) ax.set_xlabel('x (meter)') ax.set_ylabel('y (meter)') ax.set_zlabel('z (meter)') @@ -310,12 +279,7 @@ def _plot_xyz_indices(self) -> None: fig = plt.figure('dot_index_map') plt.imshow( self._dot_points_xyz_mat[..., 2], - extent=( - self._x_min - 0.5, - self._x_max + 0.5, - self._y_min - 0.5, - self._y_max + 0.5, - ), + extent=(self._x_min - 0.5, self._x_max + 0.5, self._y_min - 0.5, self._y_max + 0.5), origin='lower', ) cb = plt.colorbar() @@ -337,18 +301,12 @@ def get_data(self) -> tuple[ndarray, ndarray, ndarray]: ndarray (N, M, 3) array of dot xyz locations """ - return ( - self._dot_image_points_indices_x, - self._dot_image_points_indices_y, - self._dot_points_xyz_mat, - ) + return (self._dot_image_points_indices_x, self._dot_image_points_indices_y, self._dot_points_xyz_mat) def get_dot_location_object(self) -> DotLocationsFixedPattern: """Returns DotLocationsFixedPattern object with calibrated data""" return DotLocationsFixedPattern( - self._dot_image_points_indices_x, - self._dot_image_points_indices_y, - self._dot_points_xyz_mat, + self._dot_image_points_indices_x, self._dot_image_points_indices_y, self._dot_points_xyz_mat ) def save_figures(self, dir_save: str) -> None: diff --git a/opencsp/app/sofast/lib/DefinitionEnsemble.py b/opencsp/app/sofast/lib/DefinitionEnsemble.py index c8b1a0b1..eea2848b 100644 --- a/opencsp/app/sofast/lib/DefinitionEnsemble.py +++ b/opencsp/app/sofast/lib/DefinitionEnsemble.py @@ -79,10 +79,7 @@ def load_from_json(cls, file: str) -> 'DefinitionEnsemble': data_json = json.load(f) ensemble_perimeter = np.array( - ( - data_json['ensemble_perimeter']['facet_indices'], - data_json['ensemble_perimeter']['corner_indices'], - ) + (data_json['ensemble_perimeter']['facet_indices'], data_json['ensemble_perimeter']['corner_indices']) ).T # Nx2 ndarray # Put data in dictionary @@ -107,9 +104,7 @@ def save_to_json(self, file: str) -> None: data_dict = { 'v_facet_locations': _Vxyz_to_dict(self.v_facet_locations), # Vxyz - 'r_facet_ensemble': _rot_list_to_dict( - self.r_facet_ensemble - ), # list[Rotation] + 'r_facet_ensemble': _rot_list_to_dict(self.r_facet_ensemble), # list[Rotation] 'ensemble_perimeter': { 'facet_indices': ensemble_perimeter[:, 0].tolist(), # list 'corner_indices': ensemble_perimeter[:, 1].tolist(), # list @@ -167,9 +162,7 @@ def load_from_hdf(cls, file: str, prefix: str) -> 'DefinitionEnsemble': r_facet_ensemble = [Rotation.from_rotvec(r) for r in data['r_facet_ensemble']] ensemble_perimeter = data['ensemble_perimeter'] v_centroid_ensemble = Vxyz(data['v_centroid_ensemble']) - return cls( - v_facet_locations, r_facet_ensemble, ensemble_perimeter, v_centroid_ensemble - ) + return cls(v_facet_locations, r_facet_ensemble, ensemble_perimeter, v_centroid_ensemble) def _Vxyz_to_dict(V: Vxyz) -> dict: diff --git a/opencsp/app/sofast/lib/DefinitionFacet.py b/opencsp/app/sofast/lib/DefinitionFacet.py index 9337c16b..8f2792bb 100644 --- a/opencsp/app/sofast/lib/DefinitionFacet.py +++ b/opencsp/app/sofast/lib/DefinitionFacet.py @@ -28,9 +28,7 @@ def __init__(self, v_facet_corners: Vxyz, v_facet_centroid: Vxyz): def copy(self) -> 'DefinitionFacet': """Returns copy of class""" - return DefinitionFacet( - self.v_facet_corners.copy(), self.v_facet_centroid.copy() - ) + return DefinitionFacet(self.v_facet_corners.copy(), self.v_facet_centroid.copy()) @classmethod def load_from_json(cls, file: str) -> 'DefinitionFacet': @@ -85,10 +83,7 @@ def save_to_hdf(self, file: str, prefix: str = '') -> None: Prefix to append to folder path within HDF file (folders must be separated by "/") """ data = [self.v_facet_corners.data, self.v_facet_centroid.data] - datasets = [ - prefix + 'DefinitionFacet/v_facet_corners', - prefix + 'DefinitionFacet/v_facet_centroid', - ] + datasets = [prefix + 'DefinitionFacet/v_facet_corners', prefix + 'DefinitionFacet/v_facet_centroid'] hdf5_tools.save_hdf5_datasets(data, datasets, file) @classmethod @@ -102,10 +97,7 @@ def load_from_hdf(cls, file: str, prefix: str) -> 'DefinitionFacet': prefix : str Prefix appended to folder path within HDF file (folders must be separated by "/") """ - datasets = [ - prefix + 'DefinitionFacet/v_facet_corners', - prefix + 'DefinitionFacet/v_facet_centroid', - ] + datasets = [prefix + 'DefinitionFacet/v_facet_corners', prefix + 'DefinitionFacet/v_facet_centroid'] data = hdf5_tools.load_hdf5_datasets(datasets, file) v_facet_corners = Vxyz(data['v_facet_corners']) v_facet_centroid = Vxyz(data['v_facet_centroid']) diff --git a/opencsp/app/sofast/lib/DisplayShape.py b/opencsp/app/sofast/lib/DisplayShape.py index 7fb79fe8..50329338 100644 --- a/opencsp/app/sofast/lib/DisplayShape.py +++ b/opencsp/app/sofast/lib/DisplayShape.py @@ -11,11 +11,7 @@ class DisplayShape: """Representation of a screen/projector for deflectometry.""" def __init__( - self, - v_cam_screen_screen: Vxyz, - r_screen_cam: Rotation, - grid_data: dict, - name: str = '', + self, v_cam_screen_screen: Vxyz, r_screen_cam: Rotation, grid_data: dict, name: str = '' ) -> 'DisplayShape': """ Instantiates deflectometry display representation. @@ -207,20 +203,14 @@ def load_from_hdf(cls, file: str): # Distorted 2D elif grid_data['screen_model'] == 'distorted2D': - datasets = [ - 'DisplayShape/xy_screen_fraction', - 'DisplayShape/xy_screen_coords', - ] + datasets = ['DisplayShape/xy_screen_fraction', 'DisplayShape/xy_screen_coords'] grid_data.update(hdf5_tools.load_hdf5_datasets(datasets, file)) grid_data['xy_screen_fraction'] = Vxy(grid_data['xy_screen_fraction']) grid_data['xy_screen_coords'] = Vxy(grid_data['xy_screen_coords']) # Distorted 3D elif grid_data['screen_model'] == 'distorted3D': - datasets = [ - 'DisplayShape/xy_screen_fraction', - 'DisplayShape/xyz_screen_coords', - ] + datasets = ['DisplayShape/xy_screen_fraction', 'DisplayShape/xyz_screen_coords'] grid_data.update(hdf5_tools.load_hdf5_datasets(datasets, file)) grid_data['xy_screen_fraction'] = Vxy(grid_data['xy_screen_fraction']) grid_data['xyz_screen_coords'] = Vxyz(grid_data['xyz_screen_coords']) @@ -229,11 +219,7 @@ def load_from_hdf(cls, file: str): raise ValueError(f'Model, {grid_data["screen_model"]}, not supported.') # Load display parameters - datasets = [ - 'DisplayShape/rvec_screen_cam', - 'DisplayShape/tvec_cam_screen_screen', - 'DisplayShape/name', - ] + datasets = ['DisplayShape/rvec_screen_cam', 'DisplayShape/tvec_cam_screen_screen', 'DisplayShape/name'] data = hdf5_tools.load_hdf5_datasets(datasets, file) # Return display object @@ -261,24 +247,14 @@ def save_to_hdf(self, file: str): data = [] for dataset in self.grid_data.keys(): datasets.append('DisplayShape/' + dataset) - if isinstance(self.grid_data[dataset], Vxy) or isinstance( - self.grid_data[dataset], Vxyz - ): + if isinstance(self.grid_data[dataset], Vxy) or isinstance(self.grid_data[dataset], Vxyz): data.append(self.grid_data[dataset].data) else: data.append(self.grid_data[dataset]) # Screen data - datasets += [ - 'DisplayShape/rvec_screen_cam', - 'DisplayShape/tvec_cam_screen_screen', - 'DisplayShape/name', - ] - data += [ - self.r_screen_cam.as_rotvec(), - self.v_cam_screen_screen.data, - self.name, - ] + datasets += ['DisplayShape/rvec_screen_cam', 'DisplayShape/tvec_cam_screen_screen', 'DisplayShape/name'] + data += [self.r_screen_cam.as_rotvec(), self.v_cam_screen_screen.data, self.name] # Save data hdf5_tools.save_hdf5_datasets(data, datasets, file) diff --git a/opencsp/app/sofast/lib/DotLocationsFixedPattern.py b/opencsp/app/sofast/lib/DotLocationsFixedPattern.py index 580f05cb..a629bb08 100644 --- a/opencsp/app/sofast/lib/DotLocationsFixedPattern.py +++ b/opencsp/app/sofast/lib/DotLocationsFixedPattern.py @@ -11,9 +11,7 @@ class DotLocationsFixedPattern: """Class that holds locations of dots for fixed pattern deflectometry.""" - def __init__( - self, x_dot_index: ndarray, y_dot_index: ndarray, xyz_dot_loc: ndarray - ) -> 'DotLocationsFixedPattern': + def __init__(self, x_dot_index: ndarray, y_dot_index: ndarray, xyz_dot_loc: ndarray) -> 'DotLocationsFixedPattern': """Instantiates class with xy indices and xyz points. Parameters @@ -26,13 +24,9 @@ def __init__( Shape (N, M, 3) array holding xyz locations of dots in screen coordinates """ if x_dot_index.size != xyz_dot_loc.shape[1]: - raise ValueError( - f'X dimensions do not match: {x_dot_index.size} and {xyz_dot_loc.shape}' - ) + raise ValueError(f'X dimensions do not match: {x_dot_index.size} and {xyz_dot_loc.shape}') if y_dot_index.size != xyz_dot_loc.shape[0]: - raise ValueError( - f'Y dimensions do not match: {y_dot_index.size} and {xyz_dot_loc.shape}' - ) + raise ValueError(f'Y dimensions do not match: {y_dot_index.size} and {xyz_dot_loc.shape}') # Store data self.x_dot_index = x_dot_index @@ -43,12 +37,7 @@ def __init__( # Calculate extents self.nx = x_dot_index.size self.ny = y_dot_index.size - self.dot_extent = ( - x_dot_index.min(), - x_dot_index.max(), - y_dot_index.min(), - y_dot_index.max(), - ) + self.dot_extent = (x_dot_index.min(), x_dot_index.max(), y_dot_index.min(), y_dot_index.max()) self.x_min = self.x_dot_index.min() self.x_offset = -self.x_min @@ -73,22 +62,12 @@ def from_projection_and_display( # Calculate xyz locations xyz_dot_loc = display.interp_func(xy_pts_frac) - x = xyz_dot_loc.x.reshape( - (fixed_pattern_projection.ny, fixed_pattern_projection.nx, 1) - ) - y = xyz_dot_loc.y.reshape( - (fixed_pattern_projection.ny, fixed_pattern_projection.nx, 1) - ) - z = xyz_dot_loc.z.reshape( - (fixed_pattern_projection.ny, fixed_pattern_projection.nx, 1) - ) + x = xyz_dot_loc.x.reshape((fixed_pattern_projection.ny, fixed_pattern_projection.nx, 1)) + y = xyz_dot_loc.y.reshape((fixed_pattern_projection.ny, fixed_pattern_projection.nx, 1)) + z = xyz_dot_loc.z.reshape((fixed_pattern_projection.ny, fixed_pattern_projection.nx, 1)) xyz_dot_loc_mat = np.concatenate((x, y, z), axis=2) - return cls( - fixed_pattern_projection.x_indices, - fixed_pattern_projection.y_indices, - xyz_dot_loc_mat, - ) + return cls(fixed_pattern_projection.x_indices, fixed_pattern_projection.y_indices, xyz_dot_loc_mat) def xy_indices_to_screen_coordinates(self, pts_idxs: Vxy) -> Vxyz: """Convertes xy point indices to xyz screen coordinates. diff --git a/opencsp/app/sofast/lib/Fringes.py b/opencsp/app/sofast/lib/Fringes.py index d44beba0..1d88afd0 100644 --- a/opencsp/app/sofast/lib/Fringes.py +++ b/opencsp/app/sofast/lib/Fringes.py @@ -25,9 +25,7 @@ def __init__(self, periods_x: list, periods_y: list): self.num_y_images = np.size(periods_y) * self.phase_shifts_y self.num_images = self.num_y_images + self.num_x_images - def get_frames( - self, x: int, y: int, dtype: str, range_: list[float, float] - ) -> np.ndarray: + def get_frames(self, x: int, y: int, dtype: str, range_: list[float, float]) -> np.ndarray: """ Returns 3D ndarray of scaled, monochrome fringe images. @@ -50,12 +48,8 @@ def get_frames( images = np.ones((y, x, self.num_images)) # float # Create sinusoids [-1, 1] - y_sinusoids = self.get_sinusoids( - y, self.periods_y, self.phase_shifts_y - ) # float - x_sinusoids = self.get_sinusoids( - x, self.periods_x, self.phase_shifts_x - ) # float + y_sinusoids = self.get_sinusoids(y, self.periods_y, self.phase_shifts_y) # float + x_sinusoids = self.get_sinusoids(x, self.periods_x, self.phase_shifts_x) # float # Create y fringes [-1, 1] for idx, sinusoid in enumerate(y_sinusoids): @@ -78,9 +72,7 @@ def get_frames( return images @staticmethod - def get_sinusoids( - length: int, periods: list[float], phase_shifts: int - ) -> list[np.ndarray]: + def get_sinusoids(length: int, periods: list[float], phase_shifts: int) -> list[np.ndarray]: """ Creates list of phase shifted sinusoids for given periods ranging from -1 to 1. diff --git a/opencsp/app/sofast/lib/ImageCalibrationAbstract.py b/opencsp/app/sofast/lib/ImageCalibrationAbstract.py index d5c042bd..20ca6e85 100644 --- a/opencsp/app/sofast/lib/ImageCalibrationAbstract.py +++ b/opencsp/app/sofast/lib/ImageCalibrationAbstract.py @@ -54,19 +54,12 @@ def _create_response_function(self) -> None: # Create interpolation function self.response_function = interpolate.interp1d( - camera_values_clip, - display_values_clip, - bounds_error=False, - fill_value=(display_min, display_max), + camera_values_clip, display_values_clip, bounds_error=False, fill_value=(display_min, display_max) ) @classmethod def from_data( - cls, - images_cal: ndarray, - display_values: ndarray, - mask: ndarray | None = None, - num_samps: int = 1000, + cls, images_cal: ndarray, display_values: ndarray, mask: ndarray | None = None, num_samps: int = 1000 ) -> 'ImageCalibrationAbstract': """ Calculates camera values from calibration images. Returns @@ -104,10 +97,7 @@ def from_data( idx_0 = idx_1 - num_samps if idx_0 < 0: idx_0 = 0 - warn( - f'Number of samples smaller than n_samps. Using {idx_1:d} samples instead.', - stacklevel=2, - ) + warn(f'Number of samples smaller than n_samps. Using {idx_1:d} samples instead.', stacklevel=2) # Get brightness values corresponding to indices vals_sort = np.sort(im_1.flatten()) @@ -125,9 +115,7 @@ def from_data( return cls(camera_values.astype(float), display_values.astype(float)) - def calculate_min_display_camera_values( - self, derivative_thresh: float = 0.4 - ) -> tuple[float, float]: + def calculate_min_display_camera_values(self, derivative_thresh: float = 0.4) -> tuple[float, float]: """ Calculates the minimum display and camera brightness values to be used in a valid calibration. Values lower than these values are too close to @@ -146,12 +134,8 @@ def calculate_min_display_camera_values( """ # Calculate normalized differential - camera_values_norm = ( - self.camera_values.astype(float) / self.camera_values.astype(float).max() - ) - display_values_norm = ( - self.display_values.astype(float) / self.display_values.astype(float).max() - ) + camera_values_norm = self.camera_values.astype(float) / self.camera_values.astype(float).max() + display_values_norm = self.display_values.astype(float) / self.display_values.astype(float).max() dy_dx = np.diff(camera_values_norm) / np.diff(display_values_norm) # Calculate data points that are below threshold @@ -183,9 +167,7 @@ def load_from_hdf(cls, file) -> 'ImageCalibrationAbstract': calibration_name = cls.get_calibration_name() if data['calibration_type'] != calibration_name: - raise ValueError( - f'ImageCalibration file is not of type {calibration_name:s}' - ) + raise ValueError(f'ImageCalibration file is not of type {calibration_name:s}') # Load grid data datasets = ['ImageCalibration/camera_values', 'ImageCalibration/display_values'] diff --git a/opencsp/app/sofast/lib/ImageCalibrationGlobal.py b/opencsp/app/sofast/lib/ImageCalibrationGlobal.py index 63eac4bf..2406e66a 100644 --- a/opencsp/app/sofast/lib/ImageCalibrationGlobal.py +++ b/opencsp/app/sofast/lib/ImageCalibrationGlobal.py @@ -1,9 +1,7 @@ from numpy import ndarray from opencsp.app.sofast.lib.ImageCalibrationAbstract import ImageCalibrationAbstract -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement class ImageCalibrationGlobal(ImageCalibrationAbstract): diff --git a/opencsp/app/sofast/lib/ImageCalibrationScaling.py b/opencsp/app/sofast/lib/ImageCalibrationScaling.py index 86268a3e..252d8119 100644 --- a/opencsp/app/sofast/lib/ImageCalibrationScaling.py +++ b/opencsp/app/sofast/lib/ImageCalibrationScaling.py @@ -2,9 +2,7 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationAbstract import ImageCalibrationAbstract -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement class ImageCalibrationScaling(ImageCalibrationAbstract): @@ -34,15 +32,9 @@ def apply_to_images(self, measurement: Measurement) -> ndarray: """ # Convert camera images to observed display values - im_dark_disp = self.response_function( - measurement.mask_images[..., 0:1] - ) # M x N x 1 - im_light_disp = self.response_function( - measurement.mask_images[..., 1:2] - ) # M x N x 1 - fringe_images_disp = self.response_function( - measurement.fringe_images - ) # M x N x n + im_dark_disp = self.response_function(measurement.mask_images[..., 0:1]) # M x N x 1 + im_light_disp = self.response_function(measurement.mask_images[..., 1:2]) # M x N x 1 + fringe_images_disp = self.response_function(measurement.fringe_images) # M x N x n # Calculate delta image im_delta_disp = im_light_disp - im_dark_disp # M x N x 1 diff --git a/opencsp/app/sofast/lib/MeasurementSofastFringe.py b/opencsp/app/sofast/lib/MeasurementSofastFringe.py index e7ecfbba..5db28636 100644 --- a/opencsp/app/sofast/lib/MeasurementSofastFringe.py +++ b/opencsp/app/sofast/lib/MeasurementSofastFringe.py @@ -50,9 +50,7 @@ def __init__( """ # Check mask image size if mask_images.shape[2] != 2 or np.ndim(mask_images) != 3: - raise ValueError( - f'Two mask images needed, but {mask_images.shape[2]} given.' - ) + raise ValueError(f'Two mask images needed, but {mask_images.shape[2]} given.') # Save input measurement data self.mask_images = mask_images @@ -72,12 +70,8 @@ def __init__( self.num_x_ims = self.fringe_periods_x.size * self.phase_shifts self.num_fringe_ims = self.fringe_images.shape[2] # Check number of input fringes - if (self.num_y_ims + self.num_x_ims) != self.num_fringe_ims or np.ndim( - fringe_images - ) != 3: - raise ValueError( - f'Incorrect number of fringe images given. Fringe images shape = {fringe_images.shape}.' - ) + if (self.num_y_ims + self.num_x_ims) != self.num_fringe_ims or np.ndim(fringe_images) != 3: + raise ValueError(f'Incorrect number of fringe images given. Fringe images shape = {fringe_images.shape}.') # Instantiate calibration objected fringes self._fringe_images_calibrated = None @@ -113,9 +107,7 @@ def fringe_images_x_calibrated(self) -> np.ndarray: """Returns calibrated x-only fringes""" return self.fringe_images_calibrated[..., self.num_y_ims :] - def calibrate_fringe_images( - self, calibration: ImageCalibrationAbstract, **kwargs - ) -> None: + def calibrate_fringe_images(self, calibration: ImageCalibrationAbstract, **kwargs) -> None: """ Performs brightness level calibration on the raw captured fringes. @@ -129,9 +121,7 @@ def calibrate_fringe_images( """ if not isinstance(calibration, ImageCalibrationAbstract): - raise ValueError( - 'Input calibration must be instance of ImageCalibrationAbstract.' - ) + raise ValueError('Input calibration must be instance of ImageCalibrationAbstract.') self._fringe_images_calibrated = calibration.apply_to_images(self, **kwargs) diff --git a/opencsp/app/sofast/lib/ParamsSofastFringe.py b/opencsp/app/sofast/lib/ParamsSofastFringe.py index 4015b1e0..dcf82c37 100644 --- a/opencsp/app/sofast/lib/ParamsSofastFringe.py +++ b/opencsp/app/sofast/lib/ParamsSofastFringe.py @@ -22,12 +22,8 @@ class ParamsSofastFringe: geometry_params: ParamsOpticGeometry = field(default_factory=ParamsOpticGeometry) # Debug objects - slope_solver_data_debug: SlopeSolverDataDebug = field( - default_factory=SlopeSolverDataDebug - ) - geometry_data_debug: DebugOpticsGeometry = field( - default_factory=DebugOpticsGeometry - ) + slope_solver_data_debug: SlopeSolverDataDebug = field(default_factory=SlopeSolverDataDebug) + geometry_data_debug: DebugOpticsGeometry = field(default_factory=DebugOpticsGeometry) def save_to_hdf(self, file: str, prefix: str = ''): """Saves data to given HDF5 file. Data is stored in PREFIX + ParamsSofastFringe/... diff --git a/opencsp/app/sofast/lib/ProcessSofastFixed.py b/opencsp/app/sofast/lib/ProcessSofastFixed.py index e881f8df..947b87f5 100644 --- a/opencsp/app/sofast/lib/ProcessSofastFixed.py +++ b/opencsp/app/sofast/lib/ProcessSofastFixed.py @@ -58,9 +58,7 @@ def __init__( self.measurement: MeasurementSofastFixed # Define blob detector - self.blob_detector: cv.SimpleBlobDetector_Params = ( - cv.SimpleBlobDetector_Params() - ) + self.blob_detector: cv.SimpleBlobDetector_Params = cv.SimpleBlobDetector_Params() self.blob_detector.minDistBetweenBlobs = 2 self.blob_detector.filterByArea = True self.blob_detector.minArea = 3 @@ -99,9 +97,7 @@ def calculate_mask(self) -> ndarray: """ # Calculate mask im_dark = self.measurement.image * 0 - images = np.concatenate( - (im_dark[..., None], self.measurement.image[..., None]), axis=2 - ) + images = np.concatenate((im_dark[..., None], self.measurement.image[..., None]), axis=2) params = [ self.params.mask_hist_thresh, self.params.mask_filt_width, @@ -160,9 +156,7 @@ def generate_geometry(self, blob_index: BlobIndex, mask_raw: np.ndarray) -> dict # Define optic orientation w.r.t. camera rot_optic_cam = self.data_geometry_general.r_optic_cam_refine_1 v_cam_optic_cam = self.data_geometry_general.v_cam_optic_cam_refine_2 - u_cam_measure_point_facet = self.data_geometry_facet[ - 0 - ].u_cam_measure_point_facet + u_cam_measure_point_facet = self.data_geometry_facet[0].u_cam_measure_point_facet # Get screen-camera pose rot_cam_optic = rot_optic_cam.inv() @@ -174,12 +168,8 @@ def generate_geometry(self, blob_index: BlobIndex, mask_raw: np.ndarray) -> dict v_optic_screen_optic = v_optic_cam_optic + v_cam_screen_optic # Calculate xyz screen points - v_screen_points_screen = ( - self.fixed_pattern_dot_locs.xy_indices_to_screen_coordinates(pts_index_xy) - ) - v_screen_points_facet = v_optic_screen_optic + v_screen_points_screen.rotate( - rot_screen_optic - ) + v_screen_points_screen = self.fixed_pattern_dot_locs.xy_indices_to_screen_coordinates(pts_index_xy) + v_screen_points_facet = v_optic_screen_optic + v_screen_points_screen.rotate(rot_screen_optic) # Calculate active pixel pointing u_pixel_pointing_cam = self.camera.vector_from_pixel(pts_image) @@ -241,24 +231,15 @@ def save_to_hdf(self, file: str, prefix: str = ''): """ self.data_slope_solver.save_to_hdf(file, 'CalculationsFixedPattern/Facet_000/') self.data_geometry_general.save_to_hdf(file, 'CalculationsFixedPattern/') - self.data_image_proccessing_general.save_to_hdf( - file, 'CalculationsFixedPattern/' - ) - self.data_geometry_facet[0].save_to_hdf( - file, 'CalculationsFixedPattern/Facet_000/' - ) - self.data_image_processing_facet[0].save_to_hdf( - file, 'CalculationsFixedPattern/Facet_000/' - ) + self.data_image_proccessing_general.save_to_hdf(file, 'CalculationsFixedPattern/') + self.data_geometry_facet[0].save_to_hdf(file, 'CalculationsFixedPattern/Facet_000/') + self.data_image_processing_facet[0].save_to_hdf(file, 'CalculationsFixedPattern/Facet_000/') self.data_error.save_to_hdf(file, 'CalculationsFixedPattern/') lt.info(f'SofastFixed data saved to: {file:s} with prefix: {prefix:s}') def get_mirror( - self, - interpolation_type: Literal[ - 'given', 'bilinear', 'clough_tocher', 'nearest' - ] = 'nearest', + self, interpolation_type: Literal['given', 'bilinear', 'clough_tocher', 'nearest'] = 'nearest' ) -> MirrorPoint: """Returns mirror object with slope data""" v_surf_pts = self.data_slope_solver.v_surf_points_facet diff --git a/opencsp/app/sofast/lib/ProcessSofastFringe.py b/opencsp/app/sofast/lib/ProcessSofastFringe.py index 547de91f..271f3d2e 100644 --- a/opencsp/app/sofast/lib/ProcessSofastFringe.py +++ b/opencsp/app/sofast/lib/ProcessSofastFringe.py @@ -158,9 +158,7 @@ class ProcessSofastFringe(HDF5_SaveAbstract): - v_mask_centroid_image """ - def __init__( - self, measurement: Measurement, camera: Camera, display: Display - ) -> 'ProcessSofastFringe': + def __init__(self, measurement: Measurement, camera: Camera, display: Display) -> 'ProcessSofastFringe': """ SOFAST processing class. @@ -178,9 +176,7 @@ def __init__( self.measurement = measurement self.display = display self.camera = camera - self.orientation = SpatialOrientation( - display.r_cam_screen, display.v_cam_screen_cam - ) + self.orientation = SpatialOrientation(display.r_cam_screen, display.v_cam_screen_cam) # Define default calculation parameters self.params = ParamsSofastFringe() @@ -196,9 +192,7 @@ def __init__( self.data_geometry_general: cdc.CalculationDataGeometryGeneral = None self.data_image_processing_general: cdc.CalculationImageProcessingGeneral = None self.data_geometry_facet: list[cdc.CalculationDataGeometryFacet] = None - self.data_image_processing_facet: list[cdc.CalculationImageProcessingFacet] = ( - None - ) + self.data_image_processing_facet: list[cdc.CalculationImageProcessingFacet] = None self.data_error: cdc.CalculationError = None self.data_characterization_facet: list[SlopeSolverData] = None @@ -227,9 +221,7 @@ def process_optic_undefined(self, surface: Surface2DAbstract) -> None: # Solve slopes self._solve_slopes([surface]) - def process_optic_singlefacet(self, - facet_data: DefinitionFacet, - surface: Surface2DAbstract) -> None: + def process_optic_singlefacet(self, facet_data: DefinitionFacet, surface: Surface2DAbstract) -> None: """ Processes optic geometry, screen intersection points, and solves for slops for single facet optic. @@ -250,10 +242,9 @@ def process_optic_singlefacet(self, # Solve slopes self._solve_slopes([surface]) - def process_optic_multifacet(self, - facet_data: list[DefinitionFacet], - ensemble_data: DefinitionEnsemble, - surfaces: list[Surface2DAbstract]) -> None: + def process_optic_multifacet( + self, facet_data: list[DefinitionFacet], ensemble_data: DefinitionEnsemble, surfaces: list[Surface2DAbstract] + ) -> None: """ Processes optic geometry, screen intersection points, and solves for slops for multi-facet optic. @@ -273,7 +264,7 @@ def process_optic_multifacet(self, lt.error_and_raise( ValueError, 'Length of facet_data does not equal length of surfaces' - f'facet_data={len(facet_data)}, surface_data={len(surfaces)}' + f'facet_data={len(facet_data)}, surface_data={len(surfaces)}', ) # Process optic/setup geometry @@ -401,7 +392,7 @@ def _process_optic_multifacet_geometry( lt.error_and_raise( ValueError, f'Given length of facet data is {len(facet_data):d}' - f'but ensemble_data expects {ensemble_data.num_facets:d} facets.' + f'but ensemble_data expects {ensemble_data.num_facets:d} facets.', ) # Calculate mask @@ -416,7 +407,7 @@ def _process_optic_multifacet_geometry( if self.params.mask_keep_largest_area: lt.warn( '"keep_largest_area" mask processing option cannot be used ' - 'for multifacet ensembles. This will be turned off.', + 'for multifacet ensembles. This will be turned off.' ) self.params.mask_keep_largest_area = False @@ -465,17 +456,13 @@ def _process_display(self) -> None: screen_ys = 1.0 - screen_ys # Store screen points in Vxy v_screen_points_fractional_screens = Vxy((screen_xs, screen_ys)) - self.data_geometry_facet[idx_facet].v_screen_points_fractional_screens = ( - v_screen_points_fractional_screens - ) + self.data_geometry_facet[idx_facet].v_screen_points_fractional_screens = v_screen_points_fractional_screens # Undistort screen points (display coordinates) v_screen_points_screen = self.display.interp_func( v_screen_points_fractional_screens ) # meters, display coordinates - self.data_geometry_facet[idx_facet].v_screen_points_screen = ( - v_screen_points_screen - ) + self.data_geometry_facet[idx_facet].v_screen_points_screen = v_screen_points_screen # Check for nans returning from screen point calculation nan_mask = np.isnan(v_screen_points_screen.data).sum(0).astype(bool) @@ -483,43 +470,33 @@ def _process_display(self) -> None: if np.any(nan_mask): lt.warn( f'{nan_mask.sum():d} / {nan_mask.size:d} points are NANs in calculated ' - 'screen points for facet {idx_facet:d}. These data points will be removed.', + 'screen points for facet {idx_facet:d}. These data points will be removed.' ) # Make mask of NANs mask_bad_pixels[mask_processed] = nan_mask # Update processed mask mask_processed[mask_bad_pixels] = False # Remove nan data points from screen points - self.data_geometry_facet[ - idx_facet - ].v_screen_points_fractional_screens = v_screen_points_fractional_screens[ + self.data_geometry_facet[idx_facet].v_screen_points_fractional_screens = ( + v_screen_points_fractional_screens[np.logical_not(nan_mask)] + ) + self.data_geometry_facet[idx_facet].v_screen_points_screen = v_screen_points_screen[ np.logical_not(nan_mask) ] - self.data_geometry_facet[idx_facet].v_screen_points_screen = ( - v_screen_points_screen[np.logical_not(nan_mask)] - ) # Save bad pixel mask - self.data_image_processing_facet[idx_facet].mask_bad_pixels = ( - mask_bad_pixels - ) + self.data_image_processing_facet[idx_facet].mask_bad_pixels = mask_bad_pixels # Calculate pixel pointing directions (camera coordinates) - u_pixel_pointing_cam = ip.calculate_active_pixels_vectors( - mask_processed, self.camera - ) + u_pixel_pointing_cam = ip.calculate_active_pixels_vectors(mask_processed, self.camera) # Convert to optic coordinates u_pixel_pointing_facet = u_pixel_pointing_cam.rotate(ori.r_cam_optic) - self.data_geometry_facet[idx_facet].u_pixel_pointing_facet = ( - u_pixel_pointing_facet - ) + self.data_geometry_facet[idx_facet].u_pixel_pointing_facet = u_pixel_pointing_facet # Convert to optic coordinates v_screen_points_facet = ori.trans_screen_optic.apply( self.data_geometry_facet[idx_facet].v_screen_points_screen ) - self.data_geometry_facet[idx_facet].v_screen_points_facet = ( - v_screen_points_facet - ) + self.data_geometry_facet[idx_facet].v_screen_points_facet = v_screen_points_facet def _solve_slopes(self, surfaces: list[Surface2DAbstract]) -> None: """ @@ -532,43 +509,24 @@ def _solve_slopes(self, surfaces: list[Surface2DAbstract]) -> None: """ # Check inputs if self.data_geometry_facet is None: - lt.error_and_raise( - ValueError, - 'Not all facets geometrically processed; cannot solve slopes.' - ) + lt.error_and_raise(ValueError, 'Not all facets geometrically processed; cannot solve slopes.') # Loop through all input facets and solve slopes self.data_characterization_facet = [] for facet_idx in range(self.num_facets): # Check debug status if self.params.slope_solver_data_debug.debug_active: - self.params.slope_solver_data_debug.optic_data = self.data_facet_def[ - facet_idx - ] + self.params.slope_solver_data_debug.optic_data = self.data_facet_def[facet_idx] # Instantiate slope solver object kwargs = { - 'v_optic_cam_optic': self.data_geometry_facet[ - facet_idx - ].spatial_orientation.v_optic_cam_optic, - 'u_active_pixel_pointing_optic': self.data_geometry_facet[ - facet_idx - ].u_pixel_pointing_facet, - 'u_measure_pixel_pointing_optic': self.data_geometry_facet[ - facet_idx - ].u_cam_measure_point_facet, - 'v_screen_points_facet': self.data_geometry_facet[ - facet_idx - ].v_screen_points_facet, - 'v_optic_screen_optic': self.data_geometry_facet[ - facet_idx - ].spatial_orientation.v_optic_screen_optic, - 'v_align_point_optic': self.data_geometry_facet[ - facet_idx - ].v_align_point_facet, - 'dist_optic_screen': self.data_geometry_facet[ - facet_idx - ].measure_point_screen_distance, + 'v_optic_cam_optic': self.data_geometry_facet[facet_idx].spatial_orientation.v_optic_cam_optic, + 'u_active_pixel_pointing_optic': self.data_geometry_facet[facet_idx].u_pixel_pointing_facet, + 'u_measure_pixel_pointing_optic': self.data_geometry_facet[facet_idx].u_cam_measure_point_facet, + 'v_screen_points_facet': self.data_geometry_facet[facet_idx].v_screen_points_facet, + 'v_optic_screen_optic': self.data_geometry_facet[facet_idx].spatial_orientation.v_optic_screen_optic, + 'v_align_point_optic': self.data_geometry_facet[facet_idx].v_align_point_facet, + 'dist_optic_screen': self.data_geometry_facet[facet_idx].measure_point_screen_distance, 'surface': surfaces[facet_idx], 'debug': self.params.slope_solver_data_debug, } @@ -588,9 +546,7 @@ def _solve_slopes(self, surfaces: list[Surface2DAbstract]) -> None: # Save input surface parameters data self.data_surfaces = surfaces - def _calculate_facet_pointing( - self, reference: Literal['average'] | int = 'average' - ) -> None: + def _calculate_facet_pointing(self, reference: Literal['average'] | int = 'average') -> None: """ Calculates facet pointing relative to the given reference. @@ -607,8 +563,7 @@ def _calculate_facet_pointing( lt.error_and_raise(ValueError, 'Given reference must be int or "average".') if isinstance(reference, int) and reference >= self.num_facets: lt.error_and_raise( - ValueError, - f'Given facet index, {reference:d}, is out of range of 0-{self.num_facets - 1:d}.' + ValueError, f'Given facet index, {reference:d}, is out of range of 0-{self.num_facets - 1:d}.' ) # Instantiate data list @@ -619,14 +574,11 @@ def _calculate_facet_pointing( for idx in range(self.num_facets): # Get transformation from user-input and slope solving trans_1 = TransformXYZ.from_R_V( - self.data_ensemble_def.r_facet_ensemble[idx], - self.data_ensemble_def.v_facet_locations[idx], + self.data_ensemble_def.r_facet_ensemble[idx], self.data_ensemble_def.v_facet_locations[idx] ) trans_2 = self.data_characterization_facet[idx].trans_alignment # Calculate inverse of slope solving transform - trans_2 = TransformXYZ.from_V(-trans_2.V) * TransformXYZ.from_R( - trans_2.R.inv() - ) + trans_2 = TransformXYZ.from_V(-trans_2.V) * TransformXYZ.from_R(trans_2.R.inv()) # Create local to global transformation trans_facet_ensemble_list.append(trans_2 * trans_1) @@ -644,16 +596,12 @@ def _calculate_facet_pointing( trans_align_pointing = TransformXYZ.from_R(r_align_pointing) # Apply alignment rotation to total transformation - trans_facet_ensemble_list = [ - trans_align_pointing * t for t in trans_facet_ensemble_list - ] + trans_facet_ensemble_list = [trans_align_pointing * t for t in trans_facet_ensemble_list] # Calculate global slope and surface points for idx in range(self.num_facets): # Get slope data - slopes = self.data_characterization_facet[ - idx - ].slopes_facet_xy # facet coordinats + slopes = self.data_characterization_facet[idx].slopes_facet_xy # facet coordinats # Calculate surface normals in local (facet) coordinates u_surf_norms = np.ones((3, slopes.shape[1])) @@ -663,9 +611,7 @@ def _calculate_facet_pointing( # Apply rotation to normal vectors u_surf_norms_global = u_surf_norms.rotate(trans_facet_ensemble_list[idx].R) # Convert normal vectors to global (ensemble) slopes - slopes_ensemble_xy = ( - -u_surf_norms_global.data[:2] / u_surf_norms_global.data[2:] - ) + slopes_ensemble_xy = -u_surf_norms_global.data[:2] / u_surf_norms_global.data[2:] # Convert surface points to global (ensemble) coordinates v_surf_points_ensemble = trans_facet_ensemble_list[idx].apply( @@ -673,15 +619,10 @@ def _calculate_facet_pointing( ) # Calculate pointing vectors in ensemble coordinates - v_facet_pointing_ensemble = Vxyz((0, 0, 1)).rotate( - trans_facet_ensemble_list[idx].R - ) + v_facet_pointing_ensemble = Vxyz((0, 0, 1)).rotate(trans_facet_ensemble_list[idx].R) data = cdc.CalculationFacetEnsemble( - trans_facet_ensemble_list[idx], - slopes_ensemble_xy, - v_surf_points_ensemble, - v_facet_pointing_ensemble, + trans_facet_ensemble_list[idx], slopes_ensemble_xy, v_surf_points_ensemble, v_facet_pointing_ensemble ) self.data_characterization_ensemble.append(data) @@ -731,9 +672,7 @@ def get_optic( facet = Facet(mirror) # Locate facet if self.optic_type == 'multi': - trans: TransformXYZ = self.data_characterization_ensemble[ - idx_mirror - ].trans_facet_ensemble + trans: TransformXYZ = self.data_characterization_ensemble[idx_mirror].trans_facet_ensemble facet.set_position_in_space(trans.V, trans.R) # Save facets facets.append(facet) @@ -761,8 +700,7 @@ def save_to_hdf(self, file: str, prefix: str = ''): if self.data_error is not None: self.data_error.save_to_hdf(file, f'{prefix:s}DataSofastCalculation/general/') self.data_geometry_general.save_to_hdf(file, f'{prefix:s}DataSofastCalculation/general/') - self.data_image_processing_general.save_to_hdf( - file, f'{prefix:s}DataSofastCalculation/general/') + self.data_image_processing_general.save_to_hdf(file, f'{prefix:s}DataSofastCalculation/general/') # Sofast parameters self.params.save_to_hdf(file, f'{prefix:s}DataSofastInput/') @@ -770,8 +708,7 @@ def save_to_hdf(self, file: str, prefix: str = ''): # Facet definition if self.data_facet_def is not None: for idx_facet, facet_data in enumerate(self.data_facet_def): - facet_data.save_to_hdf( - file, f'{prefix:s}DataSofastInput/optic_definition/facet_{idx_facet:03d}/') + facet_data.save_to_hdf(file, f'{prefix:s}DataSofastInput/optic_definition/facet_{idx_facet:03d}/') # Ensemble definition if self.data_ensemble_def is not None: @@ -779,23 +716,25 @@ def save_to_hdf(self, file: str, prefix: str = ''): # Surface definition for idx_facet, surface in enumerate(self.data_surfaces): - surface.save_to_hdf( - file, - f'{prefix:s}DataSofastInput/optic_definition/facet_{idx_facet:03d}/') + surface.save_to_hdf(file, f'{prefix:s}DataSofastInput/optic_definition/facet_{idx_facet:03d}/') # Calculations, one per facet for idx_facet in range(self.num_facets): # Save facet slope data self.data_characterization_facet[idx_facet].save_to_hdf( - file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) # Save facet geometry data self.data_geometry_facet[idx_facet].save_to_hdf( - file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) # Save facet image processing data self.data_image_processing_facet[idx_facet].save_to_hdf( - file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) if self.data_characterization_ensemble: # Save ensemle data self.data_characterization_ensemble[idx_facet].save_to_hdf( - file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'{prefix:s}DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) diff --git a/opencsp/app/sofast/lib/SpatialOrientation.py b/opencsp/app/sofast/lib/SpatialOrientation.py index 1020b545..373d70c1 100644 --- a/opencsp/app/sofast/lib/SpatialOrientation.py +++ b/opencsp/app/sofast/lib/SpatialOrientation.py @@ -8,9 +8,7 @@ class SpatialOrientation: """Holds relative orientations of camera, screen, and optic for deflectometry systems""" - def __init__( - self, r_cam_screen: Rotation, v_cam_screen_cam: Vxyz - ) -> 'SpatialOrientation': + def __init__(self, r_cam_screen: Rotation, v_cam_screen_cam: Vxyz) -> 'SpatialOrientation': """Instantiates Spatial Orienatation object Parameters @@ -71,9 +69,7 @@ def __copy__(self) -> 'SpatialOrientation': return ori - def _orient_screen_cam( - self, r_cam_screen: Rotation, v_cam_screen_cam: Vxyz - ) -> None: + def _orient_screen_cam(self, r_cam_screen: Rotation, v_cam_screen_cam: Vxyz) -> None: """Orients the screen and camera Parameters @@ -93,9 +89,7 @@ def _orient_screen_cam( self.v_cam_screen_screen = v_cam_screen_cam.rotate(r_cam_screen) self.v_screen_cam_screen = -self.v_cam_screen_screen - self.trans_screen_cam = TransformXYZ.from_R_V( - self.r_screen_cam, self.v_cam_screen_cam - ) + self.trans_screen_cam = TransformXYZ.from_R_V(self.r_screen_cam, self.v_cam_screen_cam) def orient_optic_cam(self, r_cam_optic: Rotation, v_cam_optic_cam: Vxyz) -> None: """Orients the optic and camera, and thus the optic and screen @@ -118,9 +112,7 @@ def orient_optic_cam(self, r_cam_optic: Rotation, v_cam_optic_cam: Vxyz) -> None self.v_cam_optic_optic = v_cam_optic_cam.rotate(r_cam_optic) self.v_optic_cam_optic = -self.v_cam_optic_optic - self.trans_cam_optic = TransformXYZ.from_R_V( - self.r_cam_optic, self.v_optic_cam_optic - ) + self.trans_cam_optic = TransformXYZ.from_R_V(self.r_cam_optic, self.v_optic_cam_optic) self._orient_optic_screen() @@ -130,19 +122,13 @@ def _orient_optic_screen(self) -> None: self.r_optic_screen = self.r_cam_screen * self.r_optic_cam self.r_screen_optic = self.r_optic_screen.inv() - self.v_optic_screen_optic = ( - self.v_optic_cam_optic + self.v_cam_screen_cam.rotate(self.r_cam_optic) - ) + self.v_optic_screen_optic = self.v_optic_cam_optic + self.v_cam_screen_cam.rotate(self.r_cam_optic) self.v_screen_optic_optic = -self.v_optic_screen_optic - self.v_optic_screen_screen = self.v_optic_screen_optic.rotate( - self.r_optic_screen - ) + self.v_optic_screen_screen = self.v_optic_screen_optic.rotate(self.r_optic_screen) self.v_screen_optic_screen = -self.v_optic_screen_screen - self.trans_screen_optic = TransformXYZ.from_R_V( - self.r_screen_optic, self.v_optic_screen_optic - ) + self.trans_screen_optic = TransformXYZ.from_R_V(self.r_screen_optic, self.v_optic_screen_optic) def save_to_hdf(self, file: str, prefix: str = '') -> None: """Saves only camera-screen orientation data to HDF file. Data is stored as prefix + SpatialOrientation/... @@ -155,10 +141,7 @@ def save_to_hdf(self, file: str, prefix: str = '') -> None: Prefix to append to folder path within HDF file (folders must be separated by "/") """ - datasets = [ - prefix + 'SpatialOrientation/r_cam_screen', - prefix + 'SpatialOrientation/v_cam_screen_cam', - ] + datasets = [prefix + 'SpatialOrientation/r_cam_screen', prefix + 'SpatialOrientation/v_cam_screen_cam'] data = [self.r_cam_screen.as_rotvec(), self.v_cam_screen_cam.data] @@ -194,10 +177,7 @@ def save_all_to_hdf(self, file: str, prefix: str = '') -> None: @classmethod def load_from_hdf(cls, file: str) -> 'SpatialOrientation': """Loads camera-screen orientation data from HDF file""" - datasets = [ - 'SpatialOrientation/r_cam_screen', - 'SpatialOrientation/v_cam_screen_cam', - ] + datasets = ['SpatialOrientation/r_cam_screen', 'SpatialOrientation/v_cam_screen_cam'] data = hdf5_tools.load_hdf5_datasets(datasets, file) r_cam_screen = Rotation.from_rotvec(data['r_cam_screen']) v_cam_screen_cam = Vxyz(data['v_cam_screen_cam']) diff --git a/opencsp/app/sofast/lib/SystemSofastFixed.py b/opencsp/app/sofast/lib/SystemSofastFixed.py index cb6e6fdc..c46fb59d 100644 --- a/opencsp/app/sofast/lib/SystemSofastFixed.py +++ b/opencsp/app/sofast/lib/SystemSofastFixed.py @@ -10,9 +10,7 @@ class SystemSofastFixed: in fixed pattern deflectometry. """ - def __init__( - self, size_x: int, size_y: int, width_pattern: int, spacing_pattern: int - ) -> 'SystemSofastFixed': + def __init__(self, size_x: int, size_y: int, width_pattern: int, spacing_pattern: int) -> 'SystemSofastFixed': """Instantiates SystemSofastFixed class from screen geometry parameters Parameters @@ -80,28 +78,18 @@ def _get_dot_image(self, dot_shape: str) -> ndarray[float]: and inactive area is 0, dtype float. """ if dot_shape not in ['circle', 'square']: - raise ValueError( - f'pattern_type must be one of ["circle", "square"], not {dot_shape:s}' - ) + raise ValueError(f'pattern_type must be one of ["circle", "square"], not {dot_shape:s}') if dot_shape == 'square': return np.ones((self.width_pattern, self.width_pattern), dtype=float) elif dot_shape == 'circle': - x, y = np.meshgrid( - np.arange(self.width_pattern, dtype=float), - np.arange(self.width_pattern, dtype=float), - ) + x, y = np.meshgrid(np.arange(self.width_pattern, dtype=float), np.arange(self.width_pattern, dtype=float)) x -= x.mean() y -= y.mean() r = np.sqrt(x**2 + y**2) return (r < float(self.width_pattern) / 2).astype(float) - def get_image( - self, - dtype: str, - max_int: int, - dot_shape: Literal['circle', 'square'] = 'circle', - ) -> ndarray: + def get_image(self, dtype: str, max_int: int, dot_shape: Literal['circle', 'square'] = 'circle') -> ndarray: """Creates a NxMx3 fixed pattern image Parameters diff --git a/opencsp/app/sofast/lib/SystemSofastFringe.py b/opencsp/app/sofast/lib/SystemSofastFringe.py index 91dbe4a0..77088880 100644 --- a/opencsp/app/sofast/lib/SystemSofastFringe.py +++ b/opencsp/app/sofast/lib/SystemSofastFringe.py @@ -9,9 +9,7 @@ import numpy as np from opencsp.app.sofast.lib.Fringes import Fringes -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.common.lib.camera.ImageAcquisitionAbstract import ImageAcquisitionAbstract from opencsp.common.lib.deflectometry.ImageProjection import ImageProjection from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -38,16 +36,12 @@ def __init__( self.root = image_projection.root self.image_projection = image_projection - if isinstance(image_acquisition, list) and isinstance( - image_acquisition[0], ImageAcquisitionAbstract - ): + if isinstance(image_acquisition, list) and isinstance(image_acquisition[0], ImageAcquisitionAbstract): self.image_acquisition = image_acquisition elif isinstance(image_acquisition, ImageAcquisitionAbstract): self.image_acquisition = [image_acquisition] else: - raise TypeError( - f'ImageAcquisition must be instance or list of type {ImageAcquisitionAbstract}.' - ) + raise TypeError(f'ImageAcquisition must be instance or list of type {ImageAcquisitionAbstract}.') # Show crosshairs self.image_projection.show_crosshairs() @@ -121,10 +115,7 @@ def _create_mask_images_to_display(self) -> None: self.mask_images_to_display.append(array) def _measure_sequence_display( - self, - im_disp_list: list, - im_cap_list: list[list[ndarray]], - run_next: Callable | None = None, + self, im_disp_list: list, im_cap_list: list[list[ndarray]], run_next: Callable | None = None ) -> None: """ Displays next image in sequence, waits, then captures frame from camera @@ -150,10 +141,7 @@ def _measure_sequence_display( ) def _measure_sequence_capture( - self, - im_disp_list: list, - im_cap_list: list[list], - run_next: Callable | None = None, + self, im_disp_list: list, im_cap_list: list[list], run_next: Callable | None = None ) -> None: """ Captures image from camera. If more images to display, loops to @@ -185,12 +173,7 @@ def _measure_sequence_capture( if len(im_cap_list[0]) < len(im_disp_list): # Display next image if not finished - self.root.after( - 10, - lambda: self._measure_sequence_display( - im_disp_list, im_cap_list, run_next - ), - ) + self.root.after(10, lambda: self._measure_sequence_display(im_disp_list, im_cap_list, run_next)) elif run_next is not None: # Run next operation if finished run_next() @@ -211,10 +194,7 @@ def load_fringes(self, fringes: Fringes, min_display_value: int) -> None: self.fringes = fringes # Get fringe range - fringe_range = ( - min_display_value, - self.image_projection.display_data['projector_max_int'], - ) + fringe_range = (min_display_value, self.image_projection.display_data['projector_max_int']) # Get fringe base images fringe_images_base = fringes.get_frames( @@ -228,13 +208,9 @@ def load_fringes(self, fringes: Fringes, min_display_value: int) -> None: self.fringe_images_to_display = [] for idx in range(fringe_images_base.shape[2]): # Create image - self.fringe_images_to_display.append( - np.concatenate([fringe_images_base[:, :, idx : idx + 1]] * 3, axis=2) - ) + self.fringe_images_to_display.append(np.concatenate([fringe_images_base[:, :, idx : idx + 1]] * 3, axis=2)) - def check_saturation( - self, image: ndarray, camera_max_int: int, thresh: float = 0.005 - ) -> None: + def check_saturation(self, image: ndarray, camera_max_int: int, thresh: float = 0.005) -> None: """ Checks if input image is saturated. Gives warning if image is saturated above given threshold. @@ -275,9 +251,7 @@ def capture_mask_images(self, run_next: Callable | None = None) -> None: self.mask_images_captured.append([]) # Start capturing images - self._measure_sequence_display( - self.mask_images_to_display, self.mask_images_captured, run_next - ) + self._measure_sequence_display(self.mask_images_to_display, self.mask_images_captured, run_next) def capture_fringe_images(self, run_next: Callable | None = None) -> None: """ @@ -301,9 +275,7 @@ def capture_fringe_images(self, run_next: Callable | None = None) -> None: self.fringe_images_captured.append([]) # Start capturing images - self._measure_sequence_display( - self.fringe_images_to_display, self.fringe_images_captured, run_next - ) + self._measure_sequence_display(self.fringe_images_to_display, self.fringe_images_captured, run_next) def capture_mask_and_fringe_images(self, run_next: Callable | None = None) -> None: """ @@ -330,9 +302,7 @@ def run_after_capture(): # Capture mask images, then capture fringe images, then run_next self.capture_mask_images(run_after_capture) - def run_display_camera_response_calibration( - self, res: int = 10, run_next: Callable | None = None - ) -> None: + def run_display_camera_response_calibration(self, res: int = 10, run_next: Callable | None = None) -> None: """ Calculates camera-projector response data. Data is saved in calibration_display_values and calibration_images. @@ -348,10 +318,7 @@ def run_display_camera_response_calibration( """ # Generate grayscale values self.calibration_display_values = np.arange( - 0, - self.image_projection.max_int + 1, - res, - dtype=self.image_projection.display_data['projector_data_type'], + 0, self.image_projection.max_int + 1, res, dtype=self.image_projection.display_data['projector_data_type'] ) if self.calibration_display_values[-1] != self.image_projection.max_int: self.calibration_display_values = np.concatenate( @@ -375,9 +342,7 @@ def run_display_camera_response_calibration( self.calibration_images = [] for _ in range(len(self.image_acquisition)): self.calibration_images.append([]) - self._measure_sequence_display( - cal_images_display, self.calibration_images, run_next - ) + self._measure_sequence_display(cal_images_display, self.calibration_images, run_next) def run_camera_exposure_calibration(self, run_next: Callable | None = None) -> None: """ @@ -401,9 +366,7 @@ def run_cal(): run_next() # Set displayed image to white and calibrate exposure - self.image_projection.display_image_in_active_area( - self.mask_images_to_display[1] - ) + self.image_projection.display_image_in_active_area(self.mask_images_to_display[1]) self.root.after(100, run_cal) def get_calibration_images(self) -> list[ndarray]: @@ -424,9 +387,7 @@ def get_calibration_images(self) -> list[ndarray]: images.append(np.concatenate(ims, axis=2)) return images - def get_measurements( - self, v_measure_point: Vxyz, optic_screen_dist: float, name: str - ) -> list[Measurement]: + def get_measurements(self, v_measure_point: Vxyz, optic_screen_dist: float, name: str) -> list[Measurement]: """ Returns measurement object once mask and fringe images have been captured. @@ -453,9 +414,7 @@ def get_measurements( raise ValueError('Mask images have not been captured.') measurements = [] - for fringe_images, mask_images in zip( - self.fringe_images_captured, self.mask_images_captured - ): + for fringe_images, mask_images in zip(self.fringe_images_captured, self.mask_images_captured): # Create measurement object kwargs = dict( fringe_periods_x=np.array(self.fringes.periods_x), diff --git a/opencsp/app/sofast/lib/calculation_data_classes.py b/opencsp/app/sofast/lib/calculation_data_classes.py index 37ce4b02..f657f443 100644 --- a/opencsp/app/sofast/lib/calculation_data_classes.py +++ b/opencsp/app/sofast/lib/calculation_data_classes.py @@ -105,9 +105,7 @@ def save_to_hdf(self, file: str, prefix: str = ''): prefix + 'CalculationDataGeometryFacet/u_pixel_pointing_facet', prefix + 'CalculationDataGeometryFacet/v_screen_points_facet', ] - self.spatial_orientation.save_all_to_hdf( - file, prefix + 'CalculationDataGeometryFacet/' - ) + self.spatial_orientation.save_all_to_hdf(file, prefix + 'CalculationDataGeometryFacet/') _save_data_in_file(data, datasets, file) @@ -286,16 +284,10 @@ def _save_data_in_file(data_in: list, datasets_in: list, file: str) -> None: elif isinstance(d, float) or isinstance(d, int) or isinstance(d, ndarray): data.append(d) else: - raise ValueError( - f'Unrecognized data type {type(d)} could not be saved.' - ) + raise ValueError(f'Unrecognized data type {type(d)} could not be saved.') datasets.append(ds) if len(data) > 0: hdf5_tools.save_hdf5_datasets(data, datasets, file) else: - warn( - f'Length 0 dataset was not saved to file "{file:s}"', - UserWarning, - stacklevel=2, - ) + warn(f'Length 0 dataset was not saved to file "{file:s}"', UserWarning, stacklevel=2) diff --git a/opencsp/app/sofast/lib/image_processing.py b/opencsp/app/sofast/lib/image_processing.py index b084257f..e63f491e 100644 --- a/opencsp/app/sofast/lib/image_processing.py +++ b/opencsp/app/sofast/lib/image_processing.py @@ -43,14 +43,10 @@ def calc_mask_raw( # Define constants N_BINS_IMAGE = 100 # Number of bins to create histogram of image pixels N_PEAK_STEP = 10 # Width of steps to take when finding dark and light peaks in image histogram - HIST_PEAK_THRESH = ( - 0.002 # Min height of difference image histogram to consider a peak. - ) + HIST_PEAK_THRESH = 0.002 # Min height of difference image histogram to consider a peak. # Create delta image - delta = mask_images[..., 1].astype(np.float32) - mask_images[..., 0].astype( - np.float32 - ) + delta = mask_images[..., 1].astype(np.float32) - mask_images[..., 0].astype(np.float32) # Check if only two values exist (light and dark regions) if np.unique(delta).size == 2: @@ -65,12 +61,10 @@ def calc_mask_raw( if len(peaks) == 2: break if len(peaks) != 2: - raise ValueError( - 'Not enough distinction between dark and light pixels in mask images.' - ) + raise ValueError('Not enough distinction between dark and light pixels in mask images.') # Calculate minimum between two peaks - idx_hist_min = np.argmin(hist[peaks[0]: peaks[1]]) + peaks[0] + idx_hist_min = np.argmin(hist[peaks[0] : peaks[1]]) + peaks[0] # Find index of histogram that is "hist_thresh" the way between the min and max thresh_hist_min = edges[idx_hist_min + 1] @@ -82,9 +76,7 @@ def calc_mask_raw( # Filter to remove small active areas outside of main mask area k = np.ones((filt_width, filt_width), dtype=np.float32) / float(filt_width**2) - mask_filt = cv.filter2D(mask_thresh.astype(np.float32), -1, k) > float( - filt_thresh / (filt_width**2) - ) + mask_filt = cv.filter2D(mask_thresh.astype(np.float32), -1, k) > float(filt_thresh / (filt_width**2)) # Combine both masks mask_raw = np.logical_and(mask_filt, mask_thresh) @@ -92,8 +84,7 @@ def calc_mask_raw( # Check for enough active pixels thresh_active_pixels = int(mask_raw.size * thresh_active_pixels) if mask_raw.sum() < thresh_active_pixels: - lt.error_and_raise( - ValueError, f'Mask contains less than {thresh_active_pixels:d} active pixels.') + lt.error_and_raise(ValueError, f'Mask contains less than {thresh_active_pixels:d} active pixels.') # Return raw, unprocessed mask return mask_raw @@ -115,9 +106,7 @@ def keep_largest_mask_area(mask: np.ndarray) -> np.ndarray: """ # Find contours of each cluster in mask - cnts = cv.findContours( - mask.astype(np.uint8), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE - )[0] + cnts = cv.findContours(mask.astype(np.uint8), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)[0] # Find largest contour cnt = max(cnts, key=cv.contourArea) @@ -178,9 +167,7 @@ def edges_from_mask(mask: np.ndarray) -> Vxy: ] # Find edges - mask_edges = [ - (cv.filter2D(mask.astype(np.float32), -1, k) == 1)[..., np.newaxis] for k in ks - ] + mask_edges = [(cv.filter2D(mask.astype(np.float32), -1, k) == 1)[..., np.newaxis] for k in ks] mask_edges = np.concatenate(mask_edges, 2) mask_edge = mask_edges.sum(2).astype(bool) @@ -194,9 +181,7 @@ def edges_from_mask(mask: np.ndarray) -> Vxy: return Puv_edges -def refine_mask_perimeter( - loop_outline_exp: LoopXY, Puv_edges: Vxy, d_ax: float, d_perp: float -) -> LoopXY: +def refine_mask_perimeter(loop_outline_exp: LoopXY, Puv_edges: Vxy, d_ax: float, d_perp: float) -> LoopXY: """ Given mask edge points and an expected 2D PERIMETER region, this function refines the perimeter region. @@ -235,22 +220,14 @@ def refine_mask_perimeter( # Find points in loop pts_mask = loop.is_inside(Puv_edges) - lines.append( - LineXY.fit_from_points(Puv_edges[pts_mask], neighbor_dist=neighbor_dist) - ) + lines.append(LineXY.fit_from_points(Puv_edges[pts_mask], neighbor_dist=neighbor_dist)) # Create updated region return LoopXY.from_lines(lines) def keep_closest_points( - p1: Vxy, - p2: Vxy, - Puv_edge: Vxy, - Puv_cent: Vxy, - step: float, - d_perp: float, - frac_keep: float, + p1: Vxy, p2: Vxy, Puv_edge: Vxy, Puv_cent: Vxy, step: float, d_perp: float, frac_keep: float ) -> Vxy: """ Keeps points closest to centroid along direction perpendicular to line @@ -316,12 +293,7 @@ def keep_closest_points( def refine_facet_corners( - Puv_facet_corns_exp: Vxy, - Puv_cent: Vxy, - Puv_edges: Vxy, - step: float, - d_perp: float, - frac_keep: float, + Puv_facet_corns_exp: Vxy, Puv_cent: Vxy, Puv_edges: Vxy, step: float, d_perp: float, frac_keep: float ) -> LoopXY: """ Refines the locations of the facet corners using only points closest to the @@ -353,9 +325,7 @@ def refine_facet_corners( p2 = Puv_facet_corns_exp[np.mod(idx + 1, num_corns)] # Keep points closest to centroid, stepping axially between points - Puv_active = keep_closest_points( - p1, p2, Puv_edges, Puv_cent, step, d_perp, frac_keep - ) + Puv_active = keep_closest_points(p1, p2, Puv_edges, Puv_cent, step, d_perp, frac_keep) # Fit active points to line lines.append(LineXY.fit_from_points(Puv_active)) @@ -429,9 +399,7 @@ def calculate_active_pixels_vectors(mask: np.ndarray, camera: Camera) -> Uxyz: return u_active_pixel_pointing_cam # camera coordinates -def rectangle_loop_from_two_points( - p1: Vxy, p2: Vxy, d_ax: float, d_perp: float -) -> LoopXY: +def rectangle_loop_from_two_points(p1: Vxy, p2: Vxy, d_ax: float, d_perp: float) -> LoopXY: """ Creates a rectangular loop from two points, and two distances. @@ -494,9 +462,7 @@ def detect_blobs(image: np.ndarray, params: cv.SimpleBlobDetector_Params) -> Vxy return Vxy(np.array(pts).T) -def detect_blobs_annotate( - image: np.ndarray, params: cv.SimpleBlobDetector_Params -) -> np.ndarray: +def detect_blobs_annotate(image: np.ndarray, params: cv.SimpleBlobDetector_Params) -> np.ndarray: """Detects blobs in image Parameters @@ -512,18 +478,10 @@ def detect_blobs_annotate( Annotated image of blobs """ keypoints = _detect_blobs_keypoints(image, params) - return cv.drawKeypoints( - image, - keypoints, - np.array([]), - (0, 0, 255), - cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, - ) - - -def _detect_blobs_keypoints( - image: np.ndarray, params: cv.SimpleBlobDetector_Params -) -> list[cv.KeyPoint]: + return cv.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + +def _detect_blobs_keypoints(image: np.ndarray, params: cv.SimpleBlobDetector_Params) -> list[cv.KeyPoint]: """Detects blobs in image Parameters diff --git a/opencsp/app/sofast/lib/process_optics_geometry.py b/opencsp/app/sofast/lib/process_optics_geometry.py index 98520b1a..ede1c740 100644 --- a/opencsp/app/sofast/lib/process_optics_geometry.py +++ b/opencsp/app/sofast/lib/process_optics_geometry.py @@ -91,12 +91,8 @@ def process_singlefacet_geometry( ori = copy(orientation) # Get optic data - v_facet_corners: Vxyz = ( - facet_data.v_facet_corners - ) # Corners of facet in facet coordinates - v_centroid_facet: Vxyz = ( - facet_data.v_facet_centroid - ) # Centroid of facet in facet coordinates + v_facet_corners: Vxyz = facet_data.v_facet_corners # Corners of facet in facet coordinates + v_centroid_facet: Vxyz = facet_data.v_facet_centroid # Centroid of facet in facet coordinates # Save mask raw data_image_processing_general.mask_raw = mask_raw @@ -136,29 +132,20 @@ def process_singlefacet_geometry( debug.figures.append(fig) plt.imshow(mask_raw) plt.scatter( - *camera.project( - v_cam_optic_centroid_cam_exp, Rotation.identity(), Vxyz((0, 0, 0)) - ).data, - marker='.' + *camera.project(v_cam_optic_centroid_cam_exp, Rotation.identity(), Vxyz((0, 0, 0))).data, marker='.' ) plt.title('Expected Optic Centroid') # Find expected orientation of optic - r_cam_optic_exp = sp.r_from_position( - v_cam_optic_centroid_cam_exp, ori.v_cam_screen_cam - ) + r_cam_optic_exp = sp.r_from_position(v_cam_optic_centroid_cam_exp, ori.v_cam_screen_cam) data_geometry_general.r_optic_cam_exp = r_cam_optic_exp.inv() # Find expected position of optic origin - v_cam_optic_cam_exp = v_cam_optic_centroid_cam_exp - v_centroid_facet.rotate( - r_cam_optic_exp.inv() - ) + v_cam_optic_cam_exp = v_cam_optic_centroid_cam_exp - v_centroid_facet.rotate(r_cam_optic_exp.inv()) data_geometry_general.v_cam_optic_cam_exp = v_cam_optic_cam_exp # Find expected optic loop in pixels - v_optic_corners_image_exp = camera.project( - v_facet_corners, r_cam_optic_exp.inv(), v_cam_optic_cam_exp - ) + v_optic_corners_image_exp = camera.project(v_facet_corners, r_cam_optic_exp.inv(), v_cam_optic_cam_exp) loop_optic_image_exp = LoopXY.from_vertices(v_optic_corners_image_exp) data_image_processing_general.loop_optic_image_exp = loop_optic_image_exp @@ -171,13 +158,8 @@ def process_singlefacet_geometry( plt.title('Expected Optic Corners') # Refine locations of optic corners with mask - prs = [ - params.perimeter_refine_axial_search_dist, - params.perimeter_refine_perpendicular_search_dist, - ] - loop_facet_image_refine = ip.refine_mask_perimeter( - loop_optic_image_exp, v_edges_image, *prs - ) + prs = [params.perimeter_refine_axial_search_dist, params.perimeter_refine_perpendicular_search_dist] + loop_facet_image_refine = ip.refine_mask_perimeter(loop_optic_image_exp, v_edges_image, *prs) data_image_processing_facet.loop_facet_image_refine = loop_facet_image_refine # Plot refined optic corners @@ -211,25 +193,16 @@ def process_singlefacet_geometry( fig = plt.figure() debug.figures.append(fig) plt.imshow(mask_raw) - pts_reproj = camera.project( - facet_data.v_facet_corners, - r_cam_optic_refine_1.inv(), - v_cam_optic_cam_refine_1, - ) + pts_reproj = camera.project(facet_data.v_facet_corners, r_cam_optic_refine_1.inv(), v_cam_optic_cam_refine_1) _plot_labeled_points(pts_reproj) plt.title('Reprojected Points 1') # Calculate refined measure point vector in optic coordinates - v_measure_point_optic_cam_refine_1 = v_measure_point_facet.rotate( - r_optic_cam_refine_1 - ) + v_measure_point_optic_cam_refine_1 = v_measure_point_facet.rotate(r_optic_cam_refine_1) # Refine V with measured optic to display distance v_cam_optic_cam_refine_2 = sp.refine_v_distance( - v_cam_optic_cam_refine_1, - optic_screen_dist, - ori.v_cam_screen_cam, - v_measure_point_optic_cam_refine_1, + v_cam_optic_cam_refine_1, optic_screen_dist, ori.v_cam_screen_cam, v_measure_point_optic_cam_refine_1 ) data_geometry_general.v_cam_optic_cam_refine_2 = v_cam_optic_cam_refine_2 @@ -238,11 +211,7 @@ def process_singlefacet_geometry( fig = plt.figure() debug.figures.append(fig) plt.imshow(mask_raw) - pts_reproj = camera.project( - facet_data.v_facet_corners, - r_cam_optic_refine_1.inv(), - v_cam_optic_cam_refine_2, - ) + pts_reproj = camera.project(facet_data.v_facet_corners, r_cam_optic_refine_1.inv(), v_cam_optic_cam_refine_2) _plot_labeled_points(pts_reproj) plt.title('Reprojected Points 2') @@ -250,40 +219,26 @@ def process_singlefacet_geometry( ori.orient_optic_cam(r_cam_optic_refine_1, v_cam_optic_cam_refine_2) # Calculate measure point pointing direction - u_cam_measure_point_facet = Uxyz( - (ori.v_cam_optic_optic + v_measure_point_facet).data - ) + u_cam_measure_point_facet = Uxyz((ori.v_cam_optic_optic + v_measure_point_facet).data) data_geometry_facet.u_cam_measure_point_facet = u_cam_measure_point_facet # Calculate errors from using only facet corners error_optic_screen_dist_1 = sp.distance_error( - ori.v_cam_screen_cam, - v_cam_optic_cam_refine_1 + v_measure_point_optic_cam_refine_1, - optic_screen_dist, + ori.v_cam_screen_cam, v_cam_optic_cam_refine_1 + v_measure_point_optic_cam_refine_1, optic_screen_dist ) data_error.error_optic_screen_dist_1 = error_optic_screen_dist_1 error_reprojection_1 = sp.reprojection_error( - camera, - v_facet_corners, - loop_facet_image_refine.vertices, - r_optic_cam_refine_1, - v_cam_optic_cam_refine_1, + camera, v_facet_corners, loop_facet_image_refine.vertices, r_optic_cam_refine_1, v_cam_optic_cam_refine_1 ) data_error.error_reprojection_1 = error_reprojection_1 # Calculate errors after refining with measured distance error_optic_screen_dist_2 = sp.distance_error( - ori.v_cam_screen_cam, - v_cam_optic_cam_refine_2 + v_measure_point_optic_cam_refine_1, - optic_screen_dist, + ori.v_cam_screen_cam, v_cam_optic_cam_refine_2 + v_measure_point_optic_cam_refine_1, optic_screen_dist ) data_error.error_optic_screen_dist_2 = error_optic_screen_dist_2 error_reprojection_2 = sp.reprojection_error( - camera, - v_facet_corners, - loop_facet_image_refine.vertices, - r_optic_cam_refine_1, - v_cam_optic_cam_refine_2, + camera, v_facet_corners, loop_facet_image_refine.vertices, r_optic_cam_refine_1, v_cam_optic_cam_refine_2 ) data_error.error_reprojection_2 = error_reprojection_2 @@ -348,9 +303,7 @@ def process_undefined_geometry( Geometric/positional errors and reprojection errors associated with solving for facet location. """ if debug.debug_active: - lt.debug( - 'process_optics_geometry debug on, but is not yet supported for undefined mirrors.' - ) + lt.debug('process_optics_geometry debug on, but is not yet supported for undefined mirrors.') # Define data classes data_geometry_general = cdc.CalculationDataGeometryGeneral() @@ -376,9 +329,7 @@ def process_undefined_geometry( data_image_processing_general.v_mask_centroid_image = v_mask_centroid_image # Find position of optic centroid in space - v_cam_optic_cam = sp.t_from_distance( - v_mask_centroid_image, optic_screen_dist, camera, orientation.v_cam_screen_cam - ) + v_cam_optic_cam = sp.t_from_distance(v_mask_centroid_image, optic_screen_dist, camera, orientation.v_cam_screen_cam) data_geometry_general.v_cam_optic_cam_exp = v_cam_optic_cam # Find orientation of optic @@ -386,9 +337,7 @@ def process_undefined_geometry( data_geometry_general.r_optic_cam_exp = r_cam_optic.inv() # Orient optic - spatial_orientation = SpatialOrientation( - orientation.r_cam_screen, orientation.v_cam_screen_cam - ) + spatial_orientation = SpatialOrientation(orientation.r_cam_screen, orientation.v_cam_screen_cam) spatial_orientation.orient_optic_cam(r_cam_optic, v_cam_optic_cam) # Calculate measure point pointing direction @@ -480,12 +429,8 @@ def process_multifacet_geometry( ensemble_data.v_facet_locations ) # Locations of facet origins relative to ensemble origin in ensemble coordinates r_facet_ensemble = ensemble_data.r_facet_ensemble # Facet to ensemble rotation - ensemble_corns_indices = ( - ensemble_data.ensemble_perimeter - ) # [(facet_idx, facet_corner_idx), ...], integers - v_centroid_ensemble = ( - ensemble_data.v_centroid_ensemble - ) # Centroid of ensemble in ensemble coordinates + ensemble_corns_indices = ensemble_data.ensemble_perimeter # [(facet_idx, facet_corner_idx), ...], integers + v_centroid_ensemble = ensemble_data.v_centroid_ensemble # Centroid of ensemble in ensemble coordinates # Get number of facets num_facets = len(v_facet_locs_ensemble) @@ -493,41 +438,30 @@ def process_multifacet_geometry( # Define data classes data_geometry_general = cdc.CalculationDataGeometryGeneral() data_image_processing_general = cdc.CalculationImageProcessingGeneral() - data_geometry_facet = [ - cdc.CalculationDataGeometryFacet() for _ in range(num_facets) - ] - data_image_processing_facet = [ - cdc.CalculationImageProcessingFacet() for _ in range(num_facets) - ] + data_geometry_facet = [cdc.CalculationDataGeometryFacet() for _ in range(num_facets)] + data_image_processing_facet = [cdc.CalculationImageProcessingFacet() for _ in range(num_facets)] data_error = cdc.CalculationError() # Convert facet corners to ensemble coordinates v_ensemble_facet_corns = [] for idx in range(num_facets): v_ensemble_facet_corns.append( - v_facet_locs_ensemble[idx] - + v_facet_corners_facet[idx].rotate(r_facet_ensemble[idx]) + v_facet_locs_ensemble[idx] + v_facet_corners_facet[idx].rotate(r_facet_ensemble[idx]) ) # Calculate ensemble corners in ensemble coordinates v_ensemble_corns_ensemble = [] - for r_facet_ensemble_cur, (idx_facet, idx_corn) in zip( - r_facet_ensemble, ensemble_corns_indices - ): + for r_facet_ensemble_cur, (idx_facet, idx_corn) in zip(r_facet_ensemble, ensemble_corns_indices): v_ensemble_corns_ensemble.append( ( v_facet_locs_ensemble[idx_facet] - + v_facet_corners_facet[idx_facet][idx_corn].rotate( - r_facet_ensemble_cur - ) + + v_facet_corners_facet[idx_facet][idx_corn].rotate(r_facet_ensemble_cur) ).data ) v_ensemble_corns_ensemble = Vxyz(np.concatenate(v_ensemble_corns_ensemble, axis=1)) # Concatenate all facet corners - v_ensemble_facet_corns_all = Vxyz( - np.concatenate([V.data for V in v_ensemble_facet_corns], axis=1) - ) + v_ensemble_facet_corns_all = Vxyz(np.concatenate([V.data for V in v_ensemble_facet_corns], axis=1)) # Calculate raw mask data_image_processing_general.mask_raw = mask_raw @@ -554,15 +488,11 @@ def process_multifacet_geometry( data_geometry_general.v_cam_optic_centroid_cam_exp = v_cam_ensemble_cent_cam_exp # Calculate expected orientation of facet ensemble - r_cam_ensemble_exp = sp.r_from_position( - v_cam_ensemble_cent_cam_exp, orientation.v_cam_screen_cam - ) + r_cam_ensemble_exp = sp.r_from_position(v_cam_ensemble_cent_cam_exp, orientation.v_cam_screen_cam) data_geometry_general.r_optic_cam_exp = r_cam_ensemble_exp.inv() # Calculate expected position of ensemble origin - v_cam_ensemble_cam_exp = v_cam_ensemble_cent_cam_exp - v_centroid_ensemble.rotate( - r_cam_ensemble_exp.inv() - ) + v_cam_ensemble_cam_exp = v_cam_ensemble_cent_cam_exp - v_centroid_ensemble.rotate(r_cam_ensemble_exp.inv()) data_geometry_general.v_cam_optic_cam_exp = v_cam_ensemble_cam_exp # Project perimeter points @@ -581,13 +511,8 @@ def process_multifacet_geometry( plt.title('Expected Perimeter Points') # Refine perimeter points - args = [ - params.perimeter_refine_axial_search_dist, - params.perimeter_refine_perpendicular_search_dist, - ] - loop_ensemble_image_refine = ip.refine_mask_perimeter( - loop_ensemble_exp, v_edges_image, *args - ) + args = [params.perimeter_refine_axial_search_dist, params.perimeter_refine_perpendicular_search_dist] + loop_ensemble_image_refine = ip.refine_mask_perimeter(loop_ensemble_exp, v_edges_image, *args) data_image_processing_general.loop_optic_image_refine = loop_ensemble_image_refine # Plot refined perimeter points @@ -610,19 +535,12 @@ def process_multifacet_geometry( # Calculate expected location of all facet corners and centroids v_facet_corners_image_exp = [ - camera.project(P, r_ensemble_cam_refine_1, v_cam_ensemble_cam_refine_1) - for P in v_ensemble_facet_corns + camera.project(P, r_ensemble_cam_refine_1, v_cam_ensemble_cam_refine_1) for P in v_ensemble_facet_corns ] - v_uv_facet_cent_exp = camera.project( - v_facet_locs_ensemble, r_ensemble_cam_refine_1, v_cam_ensemble_cam_refine_1 - ) + v_uv_facet_cent_exp = camera.project(v_facet_locs_ensemble, r_ensemble_cam_refine_1, v_cam_ensemble_cam_refine_1) for idx in range(num_facets): - data_image_processing_facet[idx].v_facet_corners_image_exp = ( - v_facet_corners_image_exp[idx] - ) - data_image_processing_facet[idx].v_facet_centroid_image_exp = ( - v_uv_facet_cent_exp[idx] - ) + data_image_processing_facet[idx].v_facet_corners_image_exp = v_facet_corners_image_exp[idx] + data_image_processing_facet[idx].v_facet_centroid_image_exp = v_uv_facet_cent_exp[idx] # Refine facet corners args = [ @@ -632,12 +550,7 @@ def process_multifacet_geometry( ] loops_facets_refined: list[LoopXY] = [] for idx in range(num_facets): - loop = ip.refine_facet_corners( - v_facet_corners_image_exp[idx], - v_uv_facet_cent_exp[idx], - v_edges_image, - *args - ) + loop = ip.refine_facet_corners(v_facet_corners_image_exp[idx], v_uv_facet_cent_exp[idx], v_edges_image, *args) loops_facets_refined.append(loop) data_image_processing_facet[idx].loop_facet_image_refine = loop @@ -654,9 +567,7 @@ def process_multifacet_geometry( v_facet_corners_all_image_refine = [] for loop in loops_facets_refined: v_facet_corners_all_image_refine.append(loop.vertices.data) - v_facet_corners_all_image_refine = Vxy( - np.concatenate(v_facet_corners_all_image_refine, axis=1) - ) + v_facet_corners_all_image_refine = Vxy(np.concatenate(v_facet_corners_all_image_refine, axis=1)) # Calculate fitted masks mask_fitted = np.zeros(mask_raw.shape + (num_facets,), dtype=bool) @@ -686,18 +597,13 @@ def process_multifacet_geometry( # Refine T with measured distance v_cam_ensemble_cam_refine_3 = sp.refine_v_distance( - v_cam_ensemble_cam_refine_2, - optic_screen_dist, - orientation.v_cam_screen_cam, - v_meas_pt_ensemble_cam_refine_2, + v_cam_ensemble_cam_refine_2, optic_screen_dist, orientation.v_cam_screen_cam, v_meas_pt_ensemble_cam_refine_2 ) data_geometry_general.v_cam_optic_cam_refine_3 = v_cam_ensemble_cam_refine_3 # Calculate error 1 (R/T calculated using only ensemble perimeter points) error_optic_screen_dist_1 = sp.distance_error( - orientation.v_cam_screen_cam, - v_cam_ensemble_cam_refine_1 + v_meas_pt_ensemble_cam_refine_1, - optic_screen_dist, + orientation.v_cam_screen_cam, v_cam_ensemble_cam_refine_1 + v_meas_pt_ensemble_cam_refine_1, optic_screen_dist ) data_error.error_optic_screen_dist_1 = error_optic_screen_dist_1 error_reprojection_1 = sp.reprojection_error( @@ -711,9 +617,7 @@ def process_multifacet_geometry( # Calculate error 2 (R/T calculated using all facet corners) error_optic_screen_dist_2 = sp.distance_error( - orientation.v_cam_screen_cam, - v_cam_ensemble_cam_refine_2 + v_meas_pt_ensemble_cam_refine_2, - optic_screen_dist, + orientation.v_cam_screen_cam, v_cam_ensemble_cam_refine_2 + v_meas_pt_ensemble_cam_refine_2, optic_screen_dist ) data_error.error_optic_screen_dist_2 = error_optic_screen_dist_2 error_reprojection_2 = sp.reprojection_error( @@ -727,9 +631,7 @@ def process_multifacet_geometry( # Calculate error 3 (T refined using measured distance) error_optic_screen_dist_3 = sp.distance_error( - orientation.v_cam_screen_cam, - v_cam_ensemble_cam_refine_3 + v_meas_pt_ensemble_cam_refine_2, - optic_screen_dist, + orientation.v_cam_screen_cam, v_cam_ensemble_cam_refine_3 + v_meas_pt_ensemble_cam_refine_2, optic_screen_dist ) data_error.error_optic_screen_dist_3 = error_optic_screen_dist_3 error_reprojection_3 = sp.reprojection_error( @@ -744,14 +646,10 @@ def process_multifacet_geometry( # Spatially orient facets and the setup for idx in range(num_facets): # Calculate ensemble to facet vector in camera coordinates - v_ensemble_facet_cam = v_facet_locs_ensemble[idx].rotate( - r_ensemble_cam_refine_2 - ) + v_ensemble_facet_cam = v_facet_locs_ensemble[idx].rotate(r_ensemble_cam_refine_2) # Instantiate spatial orientation object - facet_ori = SpatialOrientation( - orientation.r_cam_screen, orientation.v_cam_screen_cam - ) + facet_ori = SpatialOrientation(orientation.r_cam_screen, orientation.v_cam_screen_cam) # Orient facet r_cam_facet = r_facet_ensemble[idx].inv() * r_cam_ensemble_refine_2 @@ -765,9 +663,7 @@ def process_multifacet_geometry( v_cam_screen_optic = facet_ori.v_cam_screen_cam.rotate(facet_ori.r_cam_optic) dist = (v_cam_meas_pt_facet - v_cam_screen_optic).magnitude()[0] - data_geometry_facet[idx].u_cam_measure_point_facet = Uxyz( - v_cam_meas_pt_facet.data - ) + data_geometry_facet[idx].u_cam_measure_point_facet = Uxyz(v_cam_meas_pt_facet.data) data_geometry_facet[idx].measure_point_screen_distance = dist data_geometry_facet[idx].spatial_orientation = facet_ori data_geometry_facet[idx].v_align_point_facet = v_facet_centroid_facet[idx] diff --git a/opencsp/app/sofast/lib/save_DisplayShape_file.py b/opencsp/app/sofast/lib/save_DisplayShape_file.py index bca6eb24..509ecfa4 100644 --- a/opencsp/app/sofast/lib/save_DisplayShape_file.py +++ b/opencsp/app/sofast/lib/save_DisplayShape_file.py @@ -10,11 +10,7 @@ def save_DisplayShape_file( - screen_distortion_data: dict, - name: str, - rvec: ndarray, - tvec: ndarray, - file_save: str, + screen_distortion_data: dict, name: str, rvec: ndarray, tvec: ndarray, file_save: str ) -> None: """Constructs and saves DisplayShape file @@ -41,9 +37,7 @@ def save_DisplayShape_file( # Gather display grid data grid_data = dict( - screen_model='distorted3D', - xy_screen_fraction=pts_xy_screen_fraction, - xyz_screen_coords=pts_xyz_screen_coords, + screen_model='distorted3D', xy_screen_fraction=pts_xy_screen_fraction, xyz_screen_coords=pts_xyz_screen_coords ) # Create display object diff --git a/opencsp/app/sofast/lib/spatial_processing.py b/opencsp/app/sofast/lib/spatial_processing.py index 92dbad3e..a7e0484a 100644 --- a/opencsp/app/sofast/lib/spatial_processing.py +++ b/opencsp/app/sofast/lib/spatial_processing.py @@ -9,9 +9,7 @@ import opencsp.common.lib.tool.log_tools as lt -def t_from_distance( - Puv_cam: Vxy, dist: float, camera: Camera, v_cam_screen_cam: Vxyz -) -> Vxyz: +def t_from_distance(Puv_cam: Vxy, dist: float, camera: Camera, v_cam_screen_cam: Vxyz) -> Vxyz: """ Calculates the 3D point given a 2D camera pixel location and a distance from the center of the screen. @@ -37,11 +35,7 @@ def t_from_distance( u_cam = camera.vector_from_pixel(Puv_cam).as_Vxyz() # Calculate location of point relative to camera - a = np.sqrt( - u_cam.dot(v_cam_screen_cam) ** 2 - - v_cam_screen_cam.dot(v_cam_screen_cam) - + dist**2 - ) + a = np.sqrt(u_cam.dot(v_cam_screen_cam) ** 2 - v_cam_screen_cam.dot(v_cam_screen_cam) + dist**2) cam_facet_dist = u_cam.dot(v_cam_screen_cam) + a # Calculate position of point relative to camera @@ -87,10 +81,7 @@ def r_from_position(v_cam_optic_cam: Vxyz, v_cam_screen_cam: Vxyz) -> Rotation: def refine_v_distance( - v_cam_optic_cam: Vxyz, - optic_screen_dist: float, - v_cam_screen_cam: Vxyz, - v_meas_pt_optic_cam: Vxyz, + v_cam_optic_cam: Vxyz, optic_screen_dist: float, v_cam_screen_cam: Vxyz, v_meas_pt_optic_cam: Vxyz ) -> Vxyz: """ Refines the camera to optic translation vector so that measured optic @@ -128,9 +119,7 @@ def error_func(scale): return v_cam_optic_cam * out.x -def calc_rt_from_img_pts( - pts_image: Vxy, pts_object: Vxyz, camera: Camera -) -> tuple[Rotation, Vxyz]: +def calc_rt_from_img_pts(pts_image: Vxy, pts_object: Vxyz, camera: Camera) -> tuple[Rotation, Vxyz]: """ Calculates Translation and Rotation given object and image points. @@ -151,12 +140,7 @@ def calc_rt_from_img_pts( Camera-to-object cector in camera coordinates. """ - ret, rvec, tvec = cv.solvePnP( - pts_object.data.T, - pts_image.data.T, - camera.intrinsic_mat, - camera.distortion_coef, - ) + ret, rvec, tvec = cv.solvePnP(pts_object.data.T, pts_image.data.T, camera.intrinsic_mat, camera.distortion_coef) if not ret: lt.error_and_raise(ValueError, 'Could not find position of optic relative to camera.') @@ -165,11 +149,7 @@ def calc_rt_from_img_pts( def calc_r_from_img_pts( - Puv_image: Vxy, - P_object: Vxyz, - r_object_cam_0: Rotation, - v_cam_object_cam: Vxyz, - camera: Camera, + Puv_image: Vxy, P_object: Vxyz, r_object_cam_0: Rotation, v_cam_object_cam: Vxyz, camera: Camera ) -> Rotation: """ Calculates Rotation from points in image. @@ -197,9 +177,7 @@ def calc_r_from_img_pts( def error_func(rvec): # Calculate reprojection error r_object_cam = Rotation.from_rotvec(rvec) - reproj_error = reprojection_error( - camera, P_object, Puv_image, r_object_cam, v_cam_object_cam - ) # RSS pixels + reproj_error = reprojection_error(camera, P_object, Puv_image, r_object_cam, v_cam_object_cam) # RSS pixels return reproj_error @@ -209,9 +187,7 @@ def error_func(rvec): return Rotation.from_rotvec(out.x) -def distance_error( - v_cam_screen_cam: Vxyz, v_cam_meas_pt_cam: Vxyz, dist: float -) -> float: +def distance_error(v_cam_screen_cam: Vxyz, v_cam_meas_pt_cam: Vxyz, dist: float) -> float: """ Calculates optic to screen distance error as Error = MeasuredDistance - CalculatedDistance @@ -240,11 +216,7 @@ def distance_error( def reprojection_error( - camera: Camera, - P_object: Vxyz, - Puv_image: Vxy, - r_object_cam: Rotation, - v_cam_object_cam: Vxyz, + camera: Camera, P_object: Vxyz, Puv_image: Vxy, r_object_cam: Rotation, v_cam_object_cam: Vxyz ) -> float: """ Calculates reprojection error as RMS pixels. diff --git a/opencsp/app/sofast/lib/visualize_setup.py b/opencsp/app/sofast/lib/visualize_setup.py index d6240ba0..c874395f 100644 --- a/opencsp/app/sofast/lib/visualize_setup.py +++ b/opencsp/app/sofast/lib/visualize_setup.py @@ -56,41 +56,20 @@ def visualize_setup( # Calculate camera FOV x = camera.image_shape_xy[0] y = camera.image_shape_xy[1] - v_cam_fov_screen = ( - camera.vector_from_pixel(Vxy(([0, 0, x, x, 0], [0, y, y, 0, 0]))).as_Vxyz() - * length_z_axis_cam - ) + v_cam_fov_screen = camera.vector_from_pixel(Vxy(([0, 0, x, x, 0], [0, y, y, 0, 0]))).as_Vxyz() * length_z_axis_cam v_cam_fov_screen.rotate_in_place(display.r_cam_screen) v_cam_fov_screen += v_screen_cam_screen # Calculate camera X/Y axes - v_cam_x_screen = ( - Vxyz(([0, axes_length], [0, 0], [0, 0])).rotate(display.r_cam_screen) - + v_screen_cam_screen - ) - v_cam_y_screen = ( - Vxyz(([0, 0], [0, axes_length], [0, 0])).rotate(display.r_cam_screen) - + v_screen_cam_screen - ) - v_cam_z_screen = ( - Vxyz(([0, 0], [0, 0], [0, length_z_axis_cam])).rotate(display.r_cam_screen) - + v_screen_cam_screen - ) + v_cam_x_screen = Vxyz(([0, axes_length], [0, 0], [0, 0])).rotate(display.r_cam_screen) + v_screen_cam_screen + v_cam_y_screen = Vxyz(([0, 0], [0, axes_length], [0, 0])).rotate(display.r_cam_screen) + v_screen_cam_screen + v_cam_z_screen = Vxyz(([0, 0], [0, 0], [0, length_z_axis_cam])).rotate(display.r_cam_screen) + v_screen_cam_screen # Calculate object axes if v_screen_object_screen is not None: - v_obj_x_screen = ( - Vxyz(([0, axes_length], [0, 0], [0, 0])).rotate(r_object_screen) - + v_screen_object_screen - ) - v_obj_y_screen = ( - Vxyz(([0, 0], [0, axes_length], [0, 0])).rotate(r_object_screen) - + v_screen_object_screen - ) - v_obj_z_screen = ( - Vxyz(([0, 0], [0, 0], [0, axes_length])).rotate(r_object_screen) - + v_screen_object_screen - ) + v_obj_x_screen = Vxyz(([0, axes_length], [0, 0], [0, 0])).rotate(r_object_screen) + v_screen_object_screen + v_obj_y_screen = Vxyz(([0, 0], [0, axes_length], [0, 0])).rotate(r_object_screen) + v_screen_object_screen + v_obj_z_screen = Vxyz(([0, 0], [0, 0], [0, axes_length])).rotate(r_object_screen) + v_screen_object_screen # Calculate screen outline p_screen_outline = display.interp_func(Vxy(([0, 1, 1, 0, 0], [0, 0, 1, 1, 0]))) @@ -108,52 +87,28 @@ def visualize_setup( obj_y = v_screen_object_screen.y obj_z = v_screen_object_screen.z lx1 = max( - np.nanmax( - np.concatenate( - (v_screen_cam_screen.x, v_cam_fov_screen.x, p_screen_outline.x, obj_x) - ) - ), + np.nanmax(np.concatenate((v_screen_cam_screen.x, v_cam_fov_screen.x, p_screen_outline.x, obj_x))), min_axis_length_screen, ) ly1 = max( - np.nanmax( - np.concatenate( - (v_screen_cam_screen.y, v_cam_fov_screen.y, p_screen_outline.y, obj_y) - ) - ), + np.nanmax(np.concatenate((v_screen_cam_screen.y, v_cam_fov_screen.y, p_screen_outline.y, obj_y))), min_axis_length_screen, ) lz1 = max( - np.nanmax( - np.concatenate( - (v_screen_cam_screen.z, v_cam_fov_screen.z, p_screen_outline.z, obj_z) - ) - ), + np.nanmax(np.concatenate((v_screen_cam_screen.z, v_cam_fov_screen.z, p_screen_outline.z, obj_z))), min_axis_length_screen, ) # Define negative xyz screen axes extent lx2 = min( - np.nanmin( - np.concatenate( - (v_screen_cam_screen.x, v_cam_fov_screen.x, p_screen_outline.x, obj_x) - ) - ), + np.nanmin(np.concatenate((v_screen_cam_screen.x, v_cam_fov_screen.x, p_screen_outline.x, obj_x))), -min_axis_length_screen, ) ly2 = min( - np.nanmin( - np.concatenate( - (v_screen_cam_screen.y, v_cam_fov_screen.y, p_screen_outline.y, obj_y) - ) - ), + np.nanmin(np.concatenate((v_screen_cam_screen.y, v_cam_fov_screen.y, p_screen_outline.y, obj_y))), -min_axis_length_screen, ) lz2 = min( - np.nanmin( - np.concatenate( - (v_screen_cam_screen.z, v_cam_fov_screen.z, p_screen_outline.z, obj_z) - ) - ), + np.nanmin(np.concatenate((v_screen_cam_screen.z, v_cam_fov_screen.z, p_screen_outline.z, obj_z))), -min_axis_length_screen, ) # Add screen axes diff --git a/opencsp/app/sofast/test/test_CalibrateDisplayShape.py b/opencsp/app/sofast/test/test_CalibrateDisplayShape.py index a94b079f..d3446582 100644 --- a/opencsp/app/sofast/test/test_CalibrateDisplayShape.py +++ b/opencsp/app/sofast/test/test_CalibrateDisplayShape.py @@ -9,13 +9,8 @@ import pytest from opencsp.common.lib.opencsp_path.opencsp_root_path import opencsp_code_dir -from opencsp.app.sofast.lib.CalibrateDisplayShape import ( - CalibrateDisplayShape, - DataInput, -) -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.CalibrateDisplayShape import CalibrateDisplayShape, DataInput +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.deflectometry.ImageProjection import ImageProjection from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -72,8 +67,7 @@ def setUpClass(cls): # Test screen distortion information cls.data_exp = load_hdf5_datasets( - ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'], - join(dir_output, 'screen_distortion_data_100_100.h5'), + ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'], join(dir_output, 'screen_distortion_data_100_100.h5') ) cls.data_meas = dist_data @@ -81,16 +75,10 @@ def setUpClass(cls): def test_screen_distortion_data(self): """Tests screen calibration data""" np.testing.assert_allclose( - self.data_meas['pts_xy_screen_fraction'].data, - self.data_exp['pts_xy_screen_fraction'], - rtol=0, - atol=1e-6, + self.data_meas['pts_xy_screen_fraction'].data, self.data_exp['pts_xy_screen_fraction'], rtol=0, atol=1e-6 ) np.testing.assert_allclose( - self.data_meas['pts_xyz_screen_coords'].data, - self.data_exp['pts_xyz_screen_coords'], - rtol=0, - atol=1e-6, + self.data_meas['pts_xyz_screen_coords'].data, self.data_exp['pts_xyz_screen_coords'], rtol=0, atol=1e-6 ) diff --git a/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py b/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py index 24f23bd3..bf82c019 100644 --- a/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py +++ b/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py @@ -24,30 +24,20 @@ def test_FixedPatternSetupCalibrate(): """Tests dot-location calibration""" # Define dot location images and origins - base_dir = join( - opencsp_code_dir(), - 'test', - 'data', - 'measurements_sofast_fixed', - 'dot_location_calibration', - ) + base_dir = join(opencsp_code_dir(), 'test', 'data', 'measurements_sofast_fixed', 'dot_location_calibration') files = [ join(base_dir, 'measurements/images/DSC03965.JPG'), join(base_dir, 'measurements/images/DSC03967.JPG'), join(base_dir, 'measurements/images/DSC03970.JPG'), join(base_dir, 'measurements/images/DSC03972.JPG'), ] - origins = ( - np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 - ) + origins = np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 origins = Vxy(origins.astype(int)) # Define other files file_camera_marker = join(base_dir, 'measurements/camera_image_calibration.h5') file_xyz_points = join(base_dir, 'measurements/point_locations.csv') - file_fpd_dot_locs_exp = join( - base_dir, 'calculations/fixed_pattern_dot_locations.h5' - ) + file_fpd_dot_locs_exp = join(base_dir, 'calculations/fixed_pattern_dot_locations.h5') dir_save = join(dirname(__file__), 'data/output/dot_location_calibration') if not exists(dir_save): diff --git a/opencsp/app/sofast/test/test_Display.py b/opencsp/app/sofast/test/test_Display.py index 983467cb..f6fbefbb 100644 --- a/opencsp/app/sofast/test/test_Display.py +++ b/opencsp/app/sofast/test/test_Display.py @@ -20,23 +20,15 @@ def setUpClass(cls): LZ = 3.0 # meters # Define test points - cls.test_Vxy_pts = Vxy( - ([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1], [0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1]) - ) + cls.test_Vxy_pts = Vxy(([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1], [0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1])) # Define rectangular input data - cls.grid_data_rect2D = { - 'screen_x': LX, - 'screen_y': LY, - 'screen_model': 'rectangular2D', - } + cls.grid_data_rect2D = {'screen_x': LX, 'screen_y': LY, 'screen_model': 'rectangular2D'} # Define 2D input data cls.grid_data_2D = { 'xy_screen_fraction': Vxy(([0, 1, 0, 1], [0, 0, 1, 1])), - 'xy_screen_coords': Vxy( - ([-LX / 2, LX / 2, -LX / 2, LX / 2], [-LY / 2, -LY / 2, LY / 2, LY / 2]) - ), + 'xy_screen_coords': Vxy(([-LX / 2, LX / 2, -LX / 2, LX / 2], [-LY / 2, -LY / 2, LY / 2, LY / 2])), 'screen_model': 'distorted2D', } @@ -44,11 +36,7 @@ def setUpClass(cls): cls.grid_data_3D = { 'xy_screen_fraction': Vxy(([0, 1, 0, 1], [0, 0, 1, 1])), 'xyz_screen_coords': Vxyz( - ( - [-LX / 2, LX / 2, -LX / 2, LX / 2], - [-LY / 2, -LY / 2, LY / 2, LY / 2], - [LZ, LZ, 0, 0], - ) + ([-LX / 2, LX / 2, -LX / 2, LX / 2], [-LY / 2, -LY / 2, LY / 2, LY / 2], [LZ, LZ, 0, 0]) ), 'screen_model': 'distorted3D', } @@ -76,17 +64,13 @@ def test_rectangular2D(self): v_cam_screen_screen = Vxyz((0, 0, 1)) r_screen_cam = Rotation.from_rotvec(np.array([0.0, 0.0, 0.0])) name = 'Test DisplayShape' - disp = DisplayShape( - v_cam_screen_screen, r_screen_cam, self.grid_data_rect2D, name - ) + disp = DisplayShape(v_cam_screen_screen, r_screen_cam, self.grid_data_rect2D, name) # Perform calculation calc = disp.interp_func(self.test_Vxy_pts) # Test - np.testing.assert_allclose( - calc.data, self.exp_Vxy_disp_pts.data, rtol=0, atol=0 - ) + np.testing.assert_allclose(calc.data, self.exp_Vxy_disp_pts.data, rtol=0, atol=0) def test_distorted2D(self): # Instantiate display object @@ -99,9 +83,7 @@ def test_distorted2D(self): calc = disp.interp_func(self.test_Vxy_pts) # Test - np.testing.assert_allclose( - calc.data, self.exp_Vxy_disp_pts.data, rtol=0, atol=1e-7 - ) + np.testing.assert_allclose(calc.data, self.exp_Vxy_disp_pts.data, rtol=0, atol=1e-7) def test_distorted3D(self): # Instantiate display object @@ -114,9 +96,7 @@ def test_distorted3D(self): calc = disp.interp_func(self.test_Vxy_pts) # Test - np.testing.assert_allclose( - calc.data, self.exp_Vxyz_disp_pts.data, rtol=0, atol=1e-7 - ) + np.testing.assert_allclose(calc.data, self.exp_Vxyz_disp_pts.data, rtol=0, atol=1e-7) if __name__ == '__main__': diff --git a/opencsp/app/sofast/test/test_DotLocationsFixedPattern.py b/opencsp/app/sofast/test/test_DotLocationsFixedPattern.py index 32495c25..f1fcd124 100644 --- a/opencsp/app/sofast/test/test_DotLocationsFixedPattern.py +++ b/opencsp/app/sofast/test/test_DotLocationsFixedPattern.py @@ -14,14 +14,7 @@ def test_DotLocationsFixedPattern(): xv = np.array([-2, -1, 0, 1, 2]) yv = np.array([-3, -2, -1, 0, 1, 2]) x = np.array( - [ - [1, 2, 3, 4, 5], - [1, 2, 3, 4, 5], - [1, 2, 3, 4, 5], - [1, 2, 3, 4, 5], - [1, 2, 3, 4, 5], - [1, 2, 3, 4, 5], - ], + [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]], dtype=float, ) y = np.array( @@ -61,10 +54,7 @@ def test_DotLocationsFixedPattern(): def test_from_Display(): # Load display - file_disp = os.path.join( - opencsp_code_dir(), - 'test/data/measurements_sofast_fringe/display_distorted_3d.h5', - ) + file_disp = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe/display_distorted_3d.h5') display = Display.load_from_hdf(file_disp) fp_proj = SystemSofastFixed(30, 30, 5, 5) @@ -86,9 +76,7 @@ def test_from_Display(): ] ) z_exp = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) - xyz_exp = np.concatenate( - (x_exp[..., None], y_exp[..., None], z_exp[..., None]), axis=2 - ) + xyz_exp = np.concatenate((x_exp[..., None], y_exp[..., None], z_exp[..., None]), axis=2) np.testing.assert_allclose(fp.xyz_dot_loc, xyz_exp, atol=1e-6, rtol=0) diff --git a/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py b/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py index caaea162..7b0a3458 100644 --- a/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py +++ b/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py @@ -6,9 +6,7 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationGlobal import ImageCalibrationGlobal -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -16,9 +14,7 @@ class TestImageCalibrationGlobal: @classmethod def setup_class(cls): # Create data - cls.camera_values = np.concatenate(([0.0, 0.0], np.linspace(1, 255, 8))).astype( - 'uint8' - ) + cls.camera_values = np.concatenate(([0.0, 0.0], np.linspace(1, 255, 8))).astype('uint8') cls.display_values = np.linspace(0, 255, 10).astype('uint8') # Create frames @@ -41,35 +37,24 @@ def test_apply_to_images(self): mask_images = mask_images.astype('uint8') # Create fringe images - fringe_images = np.ones((100, 200, 8)) * self.camera_values[2:].reshape( - (1, 1, -1) - ) + fringe_images = np.ones((100, 200, 8)) * self.camera_values[2:].reshape((1, 1, -1)) fringe_images = fringe_images.astype('uint8') # Expected fringe images are same as display values - fringe_images_calibrated_exp = np.ones((100, 200, 8)) * self.display_values[ - 2: - ].astype(float).reshape((1, 1, -1)) + fringe_images_calibrated_exp = np.ones((100, 200, 8)) * self.display_values[2:].astype(float).reshape( + (1, 1, -1) + ) # Create measurement object measurement = Measurement( - mask_images, - fringe_images, - np.array([0.0]), - np.array([0.0]), - Vxyz((0, 0, 0)), - 10, - dt.datetime.now(), - 'Test', + mask_images, fringe_images, np.array([0.0]), np.array([0.0]), Vxyz((0, 0, 0)), 10, dt.datetime.now(), 'Test' ) # Calibrate fringe_images_calibrated = self.calibration.apply_to_images(measurement) # Test - np.testing.assert_allclose( - fringe_images_calibrated_exp, fringe_images_calibrated - ) + np.testing.assert_allclose(fringe_images_calibrated_exp, fringe_images_calibrated) if __name__ == '__main__': diff --git a/opencsp/app/sofast/test/test_SpatialOrientation.py b/opencsp/app/sofast/test/test_SpatialOrientation.py index b0d6e61e..85b3480d 100644 --- a/opencsp/app/sofast/test/test_SpatialOrientation.py +++ b/opencsp/app/sofast/test/test_SpatialOrientation.py @@ -18,9 +18,7 @@ class TestSpatialOrientation(unittest.TestCase): @classmethod def setUpClass(cls): # Get test data location - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Define test data files for single facet processing data_file_facet = os.path.join(base_dir, 'calculations_facet/data.h5') diff --git a/opencsp/app/sofast/test/test_SystemSofastFixed.py b/opencsp/app/sofast/test/test_SystemSofastFixed.py index da203be6..f3523e77 100644 --- a/opencsp/app/sofast/test/test_SystemSofastFixed.py +++ b/opencsp/app/sofast/test/test_SystemSofastFixed.py @@ -8,12 +8,8 @@ def test_FixedPatternDisplay(): pattern = SystemSofastFixed(100, 100, 10, 10) # Test screen fractions - np.testing.assert_allclose( - pattern.x_locs_frac, np.array([0.09, 0.29, 0.49, 0.69, 0.89]), rtol=0, atol=1e-6 - ) - np.testing.assert_allclose( - pattern.y_locs_frac, np.array([0.09, 0.29, 0.49, 0.69, 0.89]), rtol=0, atol=1e-6 - ) + np.testing.assert_allclose(pattern.x_locs_frac, np.array([0.09, 0.29, 0.49, 0.69, 0.89]), rtol=0, atol=1e-6) + np.testing.assert_allclose(pattern.y_locs_frac, np.array([0.09, 0.29, 0.49, 0.69, 0.89]), rtol=0, atol=1e-6) # Test indices np.testing.assert_equal(pattern.x_indices, np.array([-2, -1, 0, 1, 2])) diff --git a/opencsp/app/sofast/test/test_SystemSofastFringe.py b/opencsp/app/sofast/test/test_SystemSofastFringe.py index 7cfb3cdf..3780a1d5 100644 --- a/opencsp/app/sofast/test/test_SystemSofastFringe.py +++ b/opencsp/app/sofast/test/test_SystemSofastFringe.py @@ -23,9 +23,7 @@ def test_SystemSofastFringe(): F = Fringes(periods_x, periods_y) # Instantiate image projection class - im_proj = ImageProjection.load_from_hdf_and_display( - os.path.join(base_dir, 'general/image_projection_test.h5') - ) + im_proj = ImageProjection.load_from_hdf_and_display(os.path.join(base_dir, 'general/image_projection_test.h5')) # Instantiate image acquisition class im_aq = ImageAcquisition() diff --git a/opencsp/app/sofast/test/test_image_processing.py b/opencsp/app/sofast/test/test_image_processing.py index 5b78e117..f568b813 100644 --- a/opencsp/app/sofast/test/test_image_processing.py +++ b/opencsp/app/sofast/test/test_image_processing.py @@ -9,9 +9,7 @@ from scipy.spatial.transform import Rotation from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.app.sofast.lib.ParamsSofastFringe import ParamsSofastFringe from opencsp.common.lib.camera.Camera import Camera import opencsp.app.sofast.lib.image_processing as ip @@ -25,15 +23,11 @@ class TestImageProcessing(unittest.TestCase): @classmethod def setUpClass(cls): # Get test data location - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Define calculation data files cls.data_file_facet = join(base_dir, 'calculations_facet/data.h5') - cls.data_file_undefined = join( - base_dir, 'calculations_undefined_mirror/data.h5' - ) + cls.data_file_undefined = join(base_dir, 'calculations_undefined_mirror/data.h5') cls.data_file_multi = join(base_dir, 'calculations_facet_ensemble/data.h5') # Define component files @@ -175,12 +169,7 @@ def test_refine_facet_corners(self): data_exp.append(data['loop_facet_image_refine']) # Perform calculation - reg = ip.refine_facet_corners( - v_facet_corners_image_exp, - v_facet_centroid_image_exp, - v_edges_image, - *args, - ) + reg = ip.refine_facet_corners(v_facet_corners_image_exp, v_facet_centroid_image_exp, v_edges_image, *args) data_calc.append(reg.vertices.data) data_exp = np.concatenate(data_exp, axis=1) @@ -216,9 +205,7 @@ def test_unwrap_phase(self): v_display_pts = np.array([screen_xs, screen_ys]) # Test - np.testing.assert_allclose( - data['v_screen_points_fractional_screens'], v_display_pts, rtol=1e-06 - ) + np.testing.assert_allclose(data['v_screen_points_fractional_screens'], v_display_pts, rtol=1e-06) def test_calculate_active_pixel_pointing_vectors(self): """Tests image_processing.calculate_active_pixel_pointing_vectors()""" @@ -239,9 +226,7 @@ def test_calculate_active_pixel_pointing_vectors(self): u_pixel_pointing_optic = u_pixel_pointing_cam.rotate(r_cam_optic).data.squeeze() # Test - np.testing.assert_allclose( - data['u_pixel_pointing_facet'], u_pixel_pointing_optic - ) + np.testing.assert_allclose(data['u_pixel_pointing_facet'], u_pixel_pointing_optic) if __name__ == '__main__': diff --git a/opencsp/app/sofast/test/test_integration_multi_facet.py b/opencsp/app/sofast/test/test_integration_multi_facet.py index 0dbf1802..f5c47bf5 100644 --- a/opencsp/app/sofast/test/test_integration_multi_facet.py +++ b/opencsp/app/sofast/test/test_integration_multi_facet.py @@ -33,9 +33,7 @@ def setUpClass(cls, base_dir: str | None = None): """ # Get test data location if base_dir is None: - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Directory Setup file_dataset = os.path.join(base_dir, 'calculations_facet_ensemble/data.h5') @@ -75,21 +73,15 @@ def setUpClass(cls, base_dir: str | None = None): sofast.params.mask_thresh_active_pixels = params['mask_thresh_active_pixels'] sofast.params.mask_keep_largest_area = params['mask_keep_largest_area'] - sofast.params.geometry_params.perimeter_refine_axial_search_dist = params[ - 'perimeter_refine_axial_search_dist' + sofast.params.geometry_params.perimeter_refine_axial_search_dist = params['perimeter_refine_axial_search_dist'] + sofast.params.geometry_params.perimeter_refine_perpendicular_search_dist = params[ + 'perimeter_refine_perpendicular_search_dist' ] - sofast.params.geometry_params.perimeter_refine_perpendicular_search_dist = ( - params['perimeter_refine_perpendicular_search_dist'] - ) - sofast.params.geometry_params.facet_corns_refine_step_length = params[ - 'facet_corns_refine_step_length' - ] - sofast.params.geometry_params.facet_corns_refine_perpendicular_search_dist = ( - params['facet_corns_refine_perpendicular_search_dist'] - ) - sofast.params.geometry_params.facet_corns_refine_frac_keep = params[ - 'facet_corns_refine_frac_keep' + sofast.params.geometry_params.facet_corns_refine_step_length = params['facet_corns_refine_step_length'] + sofast.params.geometry_params.facet_corns_refine_perpendicular_search_dist = params[ + 'facet_corns_refine_perpendicular_search_dist' ] + sofast.params.geometry_params.facet_corns_refine_frac_keep = params['facet_corns_refine_frac_keep'] # Load array data datasets = [ @@ -114,11 +106,7 @@ def setUpClass(cls, base_dir: str | None = None): f'DataSofastInput/optic_definition/facet_{idx:03d}/v_facet_corners', ] data = load_hdf5_datasets(datasets, file_dataset) - facet_data.append( - DefinitionFacet( - Vxyz(data['v_facet_corners']), Vxyz(data['v_centroid_facet']) - ) - ) + facet_data.append(DefinitionFacet(Vxyz(data['v_facet_corners']), Vxyz(data['v_centroid_facet']))) # Load surface data surfaces = [] @@ -142,12 +130,8 @@ def setUpClass(cls, base_dir: str | None = None): cls.file_dataset = file_dataset for idx in range(sofast.num_facets): - cls.data_test['slopes_facet_xy'].append( - sofast.data_characterization_facet[idx].slopes_facet_xy - ) - cls.data_test['surf_coefs_facet'].append( - sofast.data_characterization_facet[idx].surf_coefs_facet - ) + cls.data_test['slopes_facet_xy'].append(sofast.data_characterization_facet[idx].slopes_facet_xy) + cls.data_test['surf_coefs_facet'].append(sofast.data_characterization_facet[idx].surf_coefs_facet) def test_slope(self): for idx in range(self.num_facets): @@ -156,15 +140,11 @@ def test_slope(self): data_calc = self.data_test['slopes_facet_xy'][idx] # Get expected data - datasets = [ - f'DataSofastCalculation/facet/facet_{idx:03d}/slopes_facet_xy' - ] + datasets = [f'DataSofastCalculation/facet/facet_{idx:03d}/slopes_facet_xy'] data = load_hdf5_datasets(datasets, self.file_dataset) # Test - np.testing.assert_allclose( - data['slopes_facet_xy'], data_calc, atol=1e-7, rtol=0 - ) + np.testing.assert_allclose(data['slopes_facet_xy'], data_calc, atol=1e-7, rtol=0) def test_surf_coefs(self): for idx in range(self.num_facets): @@ -173,15 +153,11 @@ def test_surf_coefs(self): data_calc = self.data_test['surf_coefs_facet'][idx] # Get expected data - datasets = [ - f'DataSofastCalculation/facet/facet_{idx:03d}/surf_coefs_facet' - ] + datasets = [f'DataSofastCalculation/facet/facet_{idx:03d}/surf_coefs_facet'] data = load_hdf5_datasets(datasets, self.file_dataset) # Test - np.testing.assert_allclose( - data['surf_coefs_facet'], data_calc, atol=1e-8, rtol=0 - ) + np.testing.assert_allclose(data['surf_coefs_facet'], data_calc, atol=1e-8, rtol=0) if __name__ == '__main__': diff --git a/opencsp/app/sofast/test/test_integration_single_facet.py b/opencsp/app/sofast/test/test_integration_single_facet.py index c7467729..11c79216 100644 --- a/opencsp/app/sofast/test/test_integration_single_facet.py +++ b/opencsp/app/sofast/test/test_integration_single_facet.py @@ -33,14 +33,10 @@ def setUpClass(cls, base_dir: str | None = None): """ # Get test data location if base_dir is None: - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Find all test files - cls.files_dataset = glob.glob( - os.path.join(base_dir, 'calculations_facet/data*.h5') - ) + cls.files_dataset = glob.glob(os.path.join(base_dir, 'calculations_facet/data*.h5')) if len(cls.files_dataset) == 0: raise ValueError('No single-facet datsets found.') @@ -74,16 +70,11 @@ def setUpClass(cls, base_dir: str | None = None): ], file_dataset, ) - surface_data['robust_least_squares'] = bool( - surface_data['robust_least_squares'] - ) + surface_data['robust_least_squares'] = bool(surface_data['robust_least_squares']) if surface_data['surface_type'] == 'parabolic': surface_data.update( load_hdf5_datasets( - [ - 'DataSofastInput/surface_params/facet_000/initial_focal_lengths_xy' - ], - file_dataset, + ['DataSofastInput/surface_params/facet_000/initial_focal_lengths_xy'], file_dataset ) ) surface = Surface2DParabolic( @@ -92,10 +83,7 @@ def setUpClass(cls, base_dir: str | None = None): surface_data['downsample'], ) else: - surface = Surface2DPlano( - surface_data['robust_least_squares'], - surface_data['downsample'], - ) + surface = Surface2DPlano(surface_data['robust_least_squares'], surface_data['downsample']) # Load optic data facet_data = load_hdf5_datasets( @@ -105,10 +93,7 @@ def setUpClass(cls, base_dir: str | None = None): ], file_dataset, ) - facet_data = DefinitionFacet( - Vxyz(facet_data['v_facet_corners']), - Vxyz(facet_data['v_centroid_facet']), - ) + facet_data = DefinitionFacet(Vxyz(facet_data['v_facet_corners']), Vxyz(facet_data['v_centroid_facet'])) # Load sofast params datasets = [ @@ -132,56 +117,42 @@ def setUpClass(cls, base_dir: str | None = None): sofast.params.mask_hist_thresh = params['mask_hist_thresh'] sofast.params.mask_filt_width = params['mask_filt_width'] sofast.params.mask_filt_thresh = params['mask_filt_thresh'] - sofast.params.mask_thresh_active_pixels = params[ - 'mask_thresh_active_pixels' - ] + sofast.params.mask_thresh_active_pixels = params['mask_thresh_active_pixels'] sofast.params.mask_keep_largest_area = params['mask_keep_largest_area'] sofast.params.geometry_params.perimeter_refine_axial_search_dist = params[ 'perimeter_refine_axial_search_dist' ] - sofast.params.geometry_params.perimeter_refine_perpendicular_search_dist = ( - params['perimeter_refine_perpendicular_search_dist'] - ) - sofast.params.geometry_params.facet_corns_refine_step_length = params[ - 'facet_corns_refine_step_length' + sofast.params.geometry_params.perimeter_refine_perpendicular_search_dist = params[ + 'perimeter_refine_perpendicular_search_dist' ] + sofast.params.geometry_params.facet_corns_refine_step_length = params['facet_corns_refine_step_length'] sofast.params.geometry_params.facet_corns_refine_perpendicular_search_dist = params[ 'facet_corns_refine_perpendicular_search_dist' ] - sofast.params.geometry_params.facet_corns_refine_frac_keep = params[ - 'facet_corns_refine_frac_keep' - ] + sofast.params.geometry_params.facet_corns_refine_frac_keep = params['facet_corns_refine_frac_keep'] # Run SOFAST sofast.process_optic_singlefacet(facet_data, surface) # Store test data cls.slopes.append(sofast.data_characterization_facet[0].slopes_facet_xy) - cls.surf_coefs.append( - sofast.data_characterization_facet[0].surf_coefs_facet - ) - cls.v_surf_points_facet.append( - sofast.data_characterization_facet[0].v_surf_points_facet.data - ) + cls.surf_coefs.append(sofast.data_characterization_facet[0].surf_coefs_facet) + cls.v_surf_points_facet.append(sofast.data_characterization_facet[0].v_surf_points_facet.data) def test_slopes(self): datasets = ['DataSofastCalculation/facet/facet_000/slopes_facet_xy'] for idx, file in enumerate(self.files_dataset): with self.subTest(i=idx): data = load_hdf5_datasets(datasets, file) - np.testing.assert_allclose( - data['slopes_facet_xy'], self.slopes[idx], atol=1e-7, rtol=0 - ) + np.testing.assert_allclose(data['slopes_facet_xy'], self.slopes[idx], atol=1e-7, rtol=0) def test_surf_coefs(self): datasets = ['DataSofastCalculation/facet/facet_000/surf_coefs_facet'] for idx, file in enumerate(self.files_dataset): with self.subTest(i=idx): data = load_hdf5_datasets(datasets, file) - np.testing.assert_allclose( - data['surf_coefs_facet'], self.surf_coefs[idx], atol=1e-8, rtol=0 - ) + np.testing.assert_allclose(data['surf_coefs_facet'], self.surf_coefs[idx], atol=1e-8, rtol=0) def test_int_points(self): datasets = ['DataSofastCalculation/facet/facet_000/v_surf_points_facet'] @@ -189,10 +160,7 @@ def test_int_points(self): with self.subTest(i=idx): data = load_hdf5_datasets(datasets, file) np.testing.assert_allclose( - data['v_surf_points_facet'], - self.v_surf_points_facet[idx], - atol=1e-8, - rtol=0, + data['v_surf_points_facet'], self.v_surf_points_facet[idx], atol=1e-8, rtol=0 ) diff --git a/opencsp/app/sofast/test/test_integration_undefined.py b/opencsp/app/sofast/test/test_integration_undefined.py index 856a6738..95a464cf 100644 --- a/opencsp/app/sofast/test/test_integration_undefined.py +++ b/opencsp/app/sofast/test/test_integration_undefined.py @@ -8,9 +8,7 @@ from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.deflectometry.Surface2DParabolic import Surface2DParabolic @@ -70,21 +68,15 @@ def test_undefined(self): sofast.params.mask_filt_thresh = params['mask_filt_thresh'] sofast.params.mask_thresh_active_pixels = params['mask_thresh_active_pixels'] sofast.params.mask_keep_largest_area = params['mask_keep_largest_area'] - sofast.params.geometry_params.perimeter_refine_axial_search_dist = params[ - 'perimeter_refine_axial_search_dist' - ] + sofast.params.geometry_params.perimeter_refine_axial_search_dist = params['perimeter_refine_axial_search_dist'] sofast.params.geometry_params.perimeter_refine_perpendicular_search_dist = params[ 'perimeter_refine_perpendicular_search_dist' ] - sofast.params.geometry_params.facet_corns_refine_step_length = params[ - 'facet_corns_refine_step_length' - ] + sofast.params.geometry_params.facet_corns_refine_step_length = params['facet_corns_refine_step_length'] sofast.params.geometry_params.facet_corns_refine_perpendicular_search_dist = params[ 'facet_corns_refine_perpendicular_search_dist' ] - sofast.params.geometry_params.facet_corns_refine_frac_keep = params[ - 'facet_corns_refine_frac_keep' - ] + sofast.params.geometry_params.facet_corns_refine_frac_keep = params['facet_corns_refine_frac_keep'] # Define surface data surface = Surface2DParabolic( @@ -101,9 +93,7 @@ def test_undefined(self): slope_coefs = sofast.data_characterization_facet[0].slope_coefs_facet np.testing.assert_allclose(data['slopes_facet_xy'], slopes, atol=1e-7, rtol=0) - np.testing.assert_allclose( - data['slope_coefs_facet'], slope_coefs, atol=1e-8, rtol=0 - ) + np.testing.assert_allclose(data['slope_coefs_facet'], slope_coefs, atol=1e-8, rtol=0) if __name__ == '__main__': diff --git a/opencsp/app/sofast/test/test_project_fixed_pattern_target.py b/opencsp/app/sofast/test/test_project_fixed_pattern_target.py index 11225944..70a5448c 100644 --- a/opencsp/app/sofast/test/test_project_fixed_pattern_target.py +++ b/opencsp/app/sofast/test/test_project_fixed_pattern_target.py @@ -15,16 +15,13 @@ def test_project_fixed_pattern_target(): # Set pattern parameters file_image_projection = os.path.join( - opencsp_code_dir(), - "test/data/measurements_sofast_fringe/general/image_projection_test.h5", + opencsp_code_dir(), "test/data/measurements_sofast_fringe/general/image_projection_test.h5" ) # Load ImageProjection im_proj = ImageProjection.load_from_hdf_and_display(file_image_projection) - fixed_pattern = SystemSofastFixed( - im_proj.size_x, im_proj.size_y, width_pattern=3, spacing_pattern=6 - ) + fixed_pattern = SystemSofastFixed(im_proj.size_x, im_proj.size_y, width_pattern=3, spacing_pattern=6) image = fixed_pattern.get_image('uint8', 255, 'square') # Project image diff --git a/opencsp/app/sofast/test/test_save_DisplayShape_file.py b/opencsp/app/sofast/test/test_save_DisplayShape_file.py index 64b5c0bd..71d77bdc 100644 --- a/opencsp/app/sofast/test/test_save_DisplayShape_file.py +++ b/opencsp/app/sofast/test/test_save_DisplayShape_file.py @@ -15,29 +15,20 @@ class test_save_physical_setup_file(unittest.TestCase): def test_save_physical_setup_file(self): """Loads data and saves test Display file""" # Define input file directory - dir_input_sofast = join( - opencsp_code_dir(), 'app/sofast/test/data/data_expected' - ) - dir_input_def = join( - opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_expected' - ) + dir_input_sofast = join(opencsp_code_dir(), 'app/sofast/test/data/data_expected') + dir_input_def = join(opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_expected') dir_output = join(opencsp_code_dir(), 'app/sofast/test/data/output') file_save = join(dir_output, 'test_physical_setup_file.h5') ft.create_directories_if_necessary(dir_output) # Define data files - file_screen_distortion_data = join( - dir_input_sofast, 'screen_distortion_data_100_100.h5' - ) + file_screen_distortion_data = join(dir_input_sofast, 'screen_distortion_data_100_100.h5') file_cam = join(dir_input_def, 'camera_rvec_tvec.csv') # Load data name = 'Test Physical Setup File' - data_dist = load_hdf5_datasets( - ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'], - file_screen_distortion_data, - ) + data_dist = load_hdf5_datasets(['pts_xy_screen_fraction', 'pts_xyz_screen_coords'], file_screen_distortion_data) screen_distortion_data = { 'pts_xy_screen_fraction': Vxy(data_dist['pts_xy_screen_fraction']), 'pts_xyz_screen_coords': Vxyz(data_dist['pts_xyz_screen_coords']), diff --git a/opencsp/app/sofast/test/test_spatial_processing.py b/opencsp/app/sofast/test/test_spatial_processing.py index 7143b1a7..23ba9115 100644 --- a/opencsp/app/sofast/test/test_spatial_processing.py +++ b/opencsp/app/sofast/test/test_spatial_processing.py @@ -8,9 +8,7 @@ from scipy.spatial.transform import Rotation from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement import opencsp.app.sofast.lib.spatial_processing as sp from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.geometry.Vxy import Vxy @@ -23,9 +21,7 @@ class TestSpatialProcessing(unittest.TestCase): @classmethod def setUpClass(cls): # Get test data location - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Define test data files for single facet processing cls.data_file_facet = os.path.join(base_dir, 'calculations_facet/data.h5') @@ -66,11 +62,7 @@ def test_r_from_position(self): # Perform calculation r_optic_cam_exp = ( - sp.r_from_position( - Vxyz(data['v_cam_optic_cam_exp']), self.display.v_cam_screen_cam - ) - .inv() - .as_rotvec() + sp.r_from_position(Vxyz(data['v_cam_optic_cam_exp']), self.display.v_cam_screen_cam).inv().as_rotvec() ) # Test @@ -91,20 +83,13 @@ def test_calc_rt_from_img_pts(self): # Perform calculation r_optic_cam, v_cam_optic_cam_refine = sp.calc_rt_from_img_pts( - Vxy(data['loop_facet_image_refine']), - Vxyz(data['v_facet_corners']), - self.camera, + Vxy(data['loop_facet_image_refine']), Vxyz(data['v_facet_corners']), self.camera ) # Test + np.testing.assert_allclose(data['r_optic_cam_refine_1'], r_optic_cam.as_rotvec(), atol=1e-5, rtol=0) np.testing.assert_allclose( - data['r_optic_cam_refine_1'], r_optic_cam.as_rotvec(), atol=1e-5, rtol=0 - ) - np.testing.assert_allclose( - data['v_cam_optic_cam_refine_1'], - v_cam_optic_cam_refine.data.squeeze(), - atol=1e-5, - rtol=0, + data['v_cam_optic_cam_refine_1'], v_cam_optic_cam_refine.data.squeeze(), atol=1e-5, rtol=0 ) def test_distance_error(self): @@ -119,15 +104,11 @@ def test_distance_error(self): # Perform calculation error_optic_screen_dist_2 = sp.distance_error( - self.display.v_cam_screen_cam, - Vxyz(data['v_cam_optic_cam_refine_2']), - measurement.optic_screen_dist, + self.display.v_cam_screen_cam, Vxyz(data['v_cam_optic_cam_refine_2']), measurement.optic_screen_dist ) # Test - np.testing.assert_allclose( - data['error_optic_screen_dist_2'], error_optic_screen_dist_2 - ) + np.testing.assert_allclose(data['error_optic_screen_dist_2'], error_optic_screen_dist_2) def test_reprojection_error(self): datasets = [ @@ -175,9 +156,7 @@ def test_refine_v_distance(self): ).data.squeeze() # Test - np.testing.assert_allclose( - data['v_cam_optic_cam_refine_2'], v_cam_optic_cam_refine_2 - ) + np.testing.assert_allclose(data['v_cam_optic_cam_refine_2'], v_cam_optic_cam_refine_2) if __name__ == '__main__': diff --git a/opencsp/app/target/target_color/lib/ImageColor.py b/opencsp/app/target/target_color/lib/ImageColor.py index 896f89a2..527211bd 100644 --- a/opencsp/app/target/target_color/lib/ImageColor.py +++ b/opencsp/app/target/target_color/lib/ImageColor.py @@ -46,18 +46,14 @@ def from_file(cls, file: str) -> 'ImageColor': if file.split('.')[-1] in ['NEF', 'RAW', 'nef', 'raw']: # Load image if raw with rawpy.imread(file) as raw: - im_array = raw.postprocess( - gamma=(1, 1), no_auto_bright=True, output_bps=16 - ) + im_array = raw.postprocess(gamma=(1, 1), no_auto_bright=True, output_bps=16) else: # Load image if not raw im_array = imageio.imread(file) return cls(im_array) - def _get_normalized_image_data( - self, - ) -> tuple[np.ndarray, np.ndarray, tuple[int, int]]: + def _get_normalized_image_data(self) -> tuple[np.ndarray, np.ndarray, tuple[int, int]]: """ Returns normalized image, RGB values as Nx3 ndarray, and XY shape of image """ @@ -71,9 +67,7 @@ def _get_normalized_image_data( return image_norm, rgb_vals, image_norm.shape[:2] - def match_indices( - self, rgb: np.ndarray, thresh: float - ) -> tuple[np.ndarray, np.ndarray]: + def match_indices(self, rgb: np.ndarray, thresh: float) -> tuple[np.ndarray, np.ndarray]: """ Calls returns the indices of pixels matching given color in form (ys, xs). diff --git a/opencsp/app/target/target_color/target_color.py b/opencsp/app/target/target_color/target_color.py index e93d1149..d8447484 100755 --- a/opencsp/app/target/target_color/target_color.py +++ b/opencsp/app/target/target_color/target_color.py @@ -21,9 +21,7 @@ if __name__ == "__main__": plt.close('all') - base_dir = ( - opencsp_dir() + '\\common\\lib\\test\\output\\TestTargetColor\\actual_output' - ) + base_dir = opencsp_dir() + '\\common\\lib\\test\\output\\TestTargetColor\\actual_output' output_dir = os.path.join( 'app', 'target', 'target_color', 'test', 'data', 'output', source_file_body ) # ?? SCAFFOLDING RCB -- ADD CODE TO CREATE DIRECTORY IF NECESSARY. @@ -66,38 +64,24 @@ # Discrete linear color bar target_color_1d_gradient.linear_color_bar( - 'discrete', - color_bar, - color_below_min, - color_above_max, - img, - dpi, - output_dir, - output_ext, + 'discrete', color_bar, color_below_min, color_above_max, img, dpi, output_dir, output_ext ) # Continuous linear color bar target_color_1d_gradient.linear_color_bar( - 'continuous', - color_bar, - color_below_min, - color_above_max, - img, - dpi, - output_dir, - output_ext, + 'continuous', color_bar, color_below_min, color_above_max, img, dpi, output_dir, output_ext ) assert False # Discrete bullseye color bar # Target definition parameters. - target_design_focal_length_m = 100 # meters. Standard focal length for a multi-purpose target. To measure a mirror with a different + target_design_focal_length_m = ( + 100 # meters. Standard focal length for a multi-purpose target. To measure a mirror with a different + ) # focal length, adjust scale on color bar in final commmunication. target_design_err_max_mrad = 3.0 # mrad. To match Braden's SOFAST plot, which has a color bar from 0 to 3 mrad. - target_design_err_min_mrad = ( - 0.005 * target_design_err_max_mrad - ) # Size of center circle for placing pilot drill. + target_design_err_min_mrad = 0.005 * target_design_err_max_mrad # Size of center circle for placing pilot drill. color_below_min = [255, 255, 255] # White center circle for placing pilot drill. color_above_max = [255, 255, 255] # White background for "saturated data." color_frame = [240, 240, 240] # Overall image frame color @@ -114,9 +98,7 @@ ) # Required target diameter is d=8*f*err_max. target_diameter_in = target_diameter_m * (1000 / 25.4) # Convert to inches. # Image height and width determined by bullseye size. - target_img_width_in = ( - target_diameter_in # Margin is controlled by the cx offset parameters. - ) + target_img_width_in = target_diameter_in # Margin is controlled by the cx offset parameters. target_img_height_in = target_img_width_in # For designing a square image print('target_img_width_in = ', target_img_width_in) print('target_img_height_in = ', target_img_height_in) @@ -144,24 +126,18 @@ leader_up_over_board_in = up_over_board_in - (target_img_height_in / 2.0) # Add leader and trailer. spool_diameter_in = 3.5 # PVC tube outer diameter. - num_turns_when_unfurled = ( - 1.75 # Number of wraps still around the spool when the target is installed. - ) + num_turns_when_unfurled = 1.75 # Number of wraps still around the spool when the target is installed. wrap_distance_when_unfurled_in = num_turns_when_unfurled * ( np.pi * spool_diameter_in ) # Length around spool when target installed. leader_margin_in = up_over_board_in - ( target_img_height_in / 2.0 ) # Leader required to go up over board and back to board center. - trailer_margin_in = ( - 6.0 # inch. Maximum distance from target bottom edge to spool when installed. - ) + trailer_margin_in = 6.0 # inch. Maximum distance from target bottom edge to spool when installed. leader_in = leader_margin_in + wrap_distance_when_unfurled_in trailer_in = trailer_margin_in + wrap_distance_when_unfurled_in img_height_in = target_img_height_in + leader_in + trailer_in - y_offset_in = trailer_in + ( - target_img_height_in / 2 - ) # Defined from bottom of image. + y_offset_in = trailer_in + (target_img_height_in / 2) # Defined from bottom of image. y_offset_pix = y_offset_in * dpi # # Image size in pixels. img_rows = int(img_height_in * dpi) @@ -176,12 +152,8 @@ y_max = n_rows # Define image offsets. - cx_offset_1_pix = round( - 0.30 * x_max - ) # Defined from center of image. Controls total image width. - cx_offset_2_pix = round( - -0.55 * x_max - ) # Defined from center of image. Does not affect total image width. + cx_offset_1_pix = round(0.30 * x_max) # Defined from center of image. Controls total image width. + cx_offset_2_pix = round(-0.55 * x_max) # Defined from center of image. Does not affect total image width. # Generate image #1. alignment_line_start_x, dummy = bullseye_color_bar( diff --git a/opencsp/app/target/target_color/target_color_bullseye.py b/opencsp/app/target/target_color/target_color_bullseye.py index 813dd0cd..570871c9 100755 --- a/opencsp/app/target/target_color/target_color_bullseye.py +++ b/opencsp/app/target/target_color/target_color_bullseye.py @@ -66,13 +66,7 @@ def bullseye_color_bar( row, col, x_max, cx_offset_pix, y_offset_pix, focal_length_meter ) color = color_convert.color_given_value( - r_mrad, - r_min_mrad, - r_max_mrad, - color_bar, - discrete_or_continuous, - color_below_min, - color_above_max, + r_mrad, r_min_mrad, r_max_mrad, color_bar, discrete_or_continuous, color_below_min, color_above_max ) # Set pixel color img[row, col, 0] = color[0] / 255.0 @@ -106,16 +100,10 @@ def bullseye_color_bar( if draw_alignment_line: cx = (x_max / 2) + cx_offset_pix this_hole_center_x = round(cx) # x position of target center, in this image. - other_hole_center_x = round( - cx_offset_alignment_pix - ) # x position of target center, in other image. - alignment_line_start_x = this_hole_center_x - ( - other_hole_center_x + round(x_max / 2) - ) + other_hole_center_x = round(cx_offset_alignment_pix) # x position of target center, in other image. + alignment_line_start_x = this_hole_center_x - (other_hole_center_x + round(x_max / 2)) for row in range(0, n_rows): - for col in range( - alignment_line_start_x, (alignment_line_start_x + frame_width_pix) - ): + for col in range(alignment_line_start_x, (alignment_line_start_x + frame_width_pix)): # Don't draw within the target. r_mrad = bullseye_error.radius_in_mrad_given_row_col( row, col, x_max, cx_offset_pix, y_offset_pix, focal_length_meter @@ -151,13 +139,7 @@ def bullseye_color_bar( else: trim_line_start_x = -999 # Save. - output_file_body = ( - 'matplotlib_' - + discrete_or_continuous - + '_bullseye_color_bar' - + '_cx' - + str(cx_offset_pix) - ) + output_file_body = 'matplotlib_' + discrete_or_continuous + '_bullseye_color_bar' + '_cx' + str(cx_offset_pix) output_file_dir_body_ext = os.path.join(output_dir, output_file_body + output_ext) print('Saving file:', output_file_dir_body_ext) plt.imsave(output_file_dir_body_ext, img, dpi=dpi) diff --git a/opencsp/app/target/target_color/target_color_bullseye_error.py b/opencsp/app/target/target_color/target_color_bullseye_error.py index 1888a062..b3baf4fa 100755 --- a/opencsp/app/target/target_color/target_color_bullseye_error.py +++ b/opencsp/app/target/target_color/target_color_bullseye_error.py @@ -29,9 +29,7 @@ def surface_normal_error_magnitude_given_radius_in_meters(r_meter, focal_length_ return surface_normal_error * 1000.0 # Convert radians to milliradians. -def radius_in_mrad_given_row_col( - n_rows, row, col, x_max, cx_offset_pix, y_offset_pix, focal_length_meter -): +def radius_in_mrad_given_row_col(n_rows, row, col, x_max, cx_offset_pix, y_offset_pix, focal_length_meter): x = col y = n_rows - row cx = (x_max / 2) + cx_offset_pix @@ -42,7 +40,5 @@ def radius_in_mrad_given_row_col( theta = math.atan2(dy, dx) # Lookup color bar entry. r_meter = meters_given_pixels(r_pixel) - r_mrad = surface_normal_error_magnitude_given_radius_in_meters( - r_meter, focal_length_meter - ) + r_mrad = surface_normal_error_magnitude_given_radius_in_meters(r_meter, focal_length_meter) return r_mrad diff --git a/opencsp/common/lib/camera/Camera.py b/opencsp/common/lib/camera/Camera.py index 657770f6..062ded73 100644 --- a/opencsp/common/lib/camera/Camera.py +++ b/opencsp/common/lib/camera/Camera.py @@ -17,11 +17,7 @@ class Camera: def __init__( - self, - intrinsic_mat: np.ndarray, - distortion_coef: np.ndarray, - image_shape_xy: tuple[int, int], - name: str, + self, intrinsic_mat: np.ndarray, distortion_coef: np.ndarray, image_shape_xy: tuple[int, int], name: str ): """ Calibrated machine vision camera representation. @@ -37,11 +33,7 @@ def __init__( Name of camera/lens combination. """ - if ( - intrinsic_mat.shape[0] != 3 - or intrinsic_mat.shape[1] != 3 - or np.ndim(intrinsic_mat) != 2 - ): + if intrinsic_mat.shape[0] != 3 or intrinsic_mat.shape[1] != 3 or np.ndim(intrinsic_mat) != 2: raise ValueError('Input intrinsic_mat must be a 3x3 ndarray.') self.intrinsic_mat = intrinsic_mat @@ -72,17 +64,13 @@ def vector_from_pixel(self, pixels: Vxy) -> Uxyz: Poining direction for each input pixel """ - pointing = cv.undistortPoints( - pixels.data, self.intrinsic_mat, self.distortion_coef - ) + pointing = cv.undistortPoints(pixels.data, self.intrinsic_mat, self.distortion_coef) pointing = pointing[:, 0, :].T z = np.ones((1, pointing.shape[1]), dtype=pointing.dtype) pointing = np.concatenate((pointing, z), axis=0) return Uxyz(pointing) - def project( - self, P_object: Vxyz, R_object_cam: Rotation, V_cam_object_cam: Vxyz - ) -> Vxy: + def project(self, P_object: Vxyz, R_object_cam: Rotation, V_cam_object_cam: Vxyz) -> Vxy: """ Projects points in 3D space to the camera sensor. @@ -101,14 +89,10 @@ def project( Projected points, pixels. """ - pixels = self.project_mat( - P_object.data.T, R_object_cam.as_rotvec(), V_cam_object_cam.data.squeeze() - ) + pixels = self.project_mat(P_object.data.T, R_object_cam.as_rotvec(), V_cam_object_cam.data.squeeze()) return Vxy(pixels.T) - def project_mat( - self, pts_object: np.ndarray, rot_vec: np.ndarray, v_cam_object_cam: np.ndarray - ) -> np.ndarray: + def project_mat(self, pts_object: np.ndarray, rot_vec: np.ndarray, v_cam_object_cam: np.ndarray) -> np.ndarray: """ Identical to project but points in matrix form. @@ -128,13 +112,7 @@ def project_mat( Nx2 array of projected points. """ - pixels = cv.projectPoints( - pts_object, - rot_vec, - v_cam_object_cam, - self.intrinsic_mat, - self.distortion_coef, - )[0] + pixels = cv.projectPoints(pts_object, rot_vec, v_cam_object_cam, self.intrinsic_mat, self.distortion_coef)[0] return pixels[:, 0, :] @classmethod @@ -148,12 +126,7 @@ def load_from_hdf(cls, file: str): HDF5 file to load """ - datasets = [ - 'Camera/intrinsic_mat', - 'Camera/distortion_coef', - 'Camera/image_shape_xy', - 'Camera/name', - ] + datasets = ['Camera/intrinsic_mat', 'Camera/distortion_coef', 'Camera/image_shape_xy', 'Camera/name'] kwargs = hdf5_tools.load_hdf5_datasets(datasets, file) return cls(**kwargs) @@ -168,16 +141,6 @@ def save_to_hdf(self, file: str): HDF5 file to save """ - datasets = [ - 'Camera/intrinsic_mat', - 'Camera/distortion_coef', - 'Camera/image_shape_xy', - 'Camera/name', - ] - data = [ - self.intrinsic_mat, - self.distortion_coef, - self.image_shape_xy, - self.name, - ] + datasets = ['Camera/intrinsic_mat', 'Camera/distortion_coef', 'Camera/image_shape_xy', 'Camera/name'] + data = [self.intrinsic_mat, self.distortion_coef, self.image_shape_xy, self.name] hdf5_tools.save_hdf5_datasets(data, datasets, file) diff --git a/opencsp/common/lib/camera/CameraTransform.py b/opencsp/common/lib/camera/CameraTransform.py index e0950ab1..b0251d5d 100644 --- a/opencsp/common/lib/camera/CameraTransform.py +++ b/opencsp/common/lib/camera/CameraTransform.py @@ -42,9 +42,7 @@ def __init__( # That is, this is the tranform that moves the cameras for it position and view direction back to face up at (0,0,0). self.transform = self.construct_transform() self.rotation_matrix = self.construct_rotation_matrix() - self.rvec = ( - self.construct_rvec() - ) # These are the transforms required by OpenCV. + self.rvec = self.construct_rvec() # These are the transforms required by OpenCV. self.tvec = self.construct_tvec() # # View direction and origin plane. self.view_dir = self.construct_view_direction() @@ -58,9 +56,7 @@ def construct_inverse_rotation_matrix(self, az, el): # Component rotations. rot_z_to_y = t3d.axisrotation(x_axis, np.deg2rad(-90.0)) rot_y_to_el = t3d.axisrotation(x_axis, el) - rot_az_about_z = t3d.axisrotation( - z_axis, -az - ) # Azimuth is a compass heading, measured clockwise from north. + rot_az_about_z = t3d.axisrotation(z_axis, -az) # Azimuth is a compass heading, measured clockwise from north. # Combined rotation. rot_z_to_el = rot_y_to_el.dot(rot_z_to_y) rot_z_to_azel = rot_az_about_z.dot(rot_z_to_el) @@ -94,11 +90,7 @@ def construct_rotation_matrix(self): # This rotation moves the camera from its pointing direction to on its back. xf = self.transform return np.array( - [ - [xf[0][0], xf[0][1], xf[0][2]], - [xf[1][0], xf[1][1], xf[1][2]], - [xf[2][0], xf[2][1], xf[2][2]], - ] + [[xf[0][0], xf[0][1], xf[0][2]], [xf[1][0], xf[1][1], xf[1][2]], [xf[2][0], xf[2][1], xf[2][2]]] ) def construct_rvec(self): @@ -127,9 +119,7 @@ def construct_origin_plane(self): A = normal[0] B = normal[1] C = normal[2] - D = ( - -distance_origin_to_plane - ) # Negate so that points on +z side of plane have positive distance values. + D = -distance_origin_to_plane # Negate so that points on +z side of plane have positive distance values. # Return. return [A, B, C, D] @@ -145,9 +135,7 @@ def image_plane_front(self, camera): A = normal[0] B = normal[1] C = normal[2] - D = ( - -distance_origin_to_plane - ) # Negate so that points on +z side of plane have positive distance values. + D = -distance_origin_to_plane # Negate so that points on +z side of plane have positive distance values. # Return. return [A, B, C, D] @@ -170,11 +158,7 @@ def pq_or_none(self, camera, xyz): [xyz] ) # float 64 required by OpenCV. Can also "obj_points = obj_points.astype('float64')" open_cv_img_points, jacobian_project = cv.projectPoints( - obj_points, - self.rvec, - self.tvec, - camera.camera_matrix, - camera.distortion_coeffs, + obj_points, self.rvec, self.tvec, camera.camera_matrix, camera.distortion_coeffs ) # The OpenCV function returns a nested list of points. We want just a list of points. if len(open_cv_img_points) != 1: @@ -194,11 +178,7 @@ def pq_or_none(self, camera, xyz): assert False img_pt = nested_img_pt[0] if len(img_pt) != 2: - print( - 'ERROR: In CameraTransform.pq_or_none(), img_pt = "' - + str(img_pt) - + '" was not of length 2.' - ) + print('ERROR: In CameraTransform.pq_or_none(), img_pt = "' + str(img_pt) + '" was not of length 2.') assert False # Set (p,q) coordinates. p = img_pt[0] diff --git a/opencsp/common/lib/camera/ImageAcquisitionAbstract.py b/opencsp/common/lib/camera/ImageAcquisitionAbstract.py index f922bb3e..1d2a08e3 100644 --- a/opencsp/common/lib/camera/ImageAcquisitionAbstract.py +++ b/opencsp/common/lib/camera/ImageAcquisitionAbstract.py @@ -47,9 +47,7 @@ def _check_saturated(im): self.exposure_time = exposure_values[0] im = self.get_frame() if _check_saturated(im): - raise ValueError( - 'Minimum exposure value is too high; image still saturated.' - ) + raise ValueError('Minimum exposure value is too high; image still saturated.') # Checks that the maximum value is over-exposed self.exposure_time = exposure_values[-1] diff --git a/opencsp/common/lib/camera/ImageAcquisition_DCAM_color.py b/opencsp/common/lib/camera/ImageAcquisition_DCAM_color.py index 44f59769..bad72bbb 100644 --- a/opencsp/common/lib/camera/ImageAcquisition_DCAM_color.py +++ b/opencsp/common/lib/camera/ImageAcquisition_DCAM_color.py @@ -44,9 +44,7 @@ def __init__(self, instance: int = 0, pixel_format: str = 'BayerRG12'): # Check number of instances if instance >= len(devices): - raise ValueError( - f'Cannot load instance {instance:d}. Only {len(devices):d} devices found.' - ) + raise ValueError(f'Cannot load instance {instance:d}. Only {len(devices):d} devices found.') # Connect to camera self.cap = pylon.InstantCamera(tlFactory.CreateDevice(devices[instance])) @@ -64,9 +62,7 @@ def __init__(self, instance: int = 0, pixel_format: str = 'BayerRG12'): # Set exposure values to be stepped over when performing exposure calibration shutter_min = self.cap.ExposureTimeRaw.Min shutter_max = self.cap.ExposureTimeRaw.Max - self._shutter_cal_values = np.linspace( - shutter_min, shutter_max, 2**13 - ).astype(int) + self._shutter_cal_values = np.linspace(shutter_min, shutter_max, 2**13).astype(int) def get_frame(self, encode: bool = True) -> np.ndarray: # Start frame capture diff --git a/opencsp/common/lib/camera/ImageAcquisition_DCAM_mono.py b/opencsp/common/lib/camera/ImageAcquisition_DCAM_mono.py index b50a957d..a918bb4b 100644 --- a/opencsp/common/lib/camera/ImageAcquisition_DCAM_mono.py +++ b/opencsp/common/lib/camera/ImageAcquisition_DCAM_mono.py @@ -47,9 +47,7 @@ def __init__(self, instance: int = 0, pixel_format: str = 'Mono8'): # Check number of instances if instance >= len(devices): - raise ValueError( - f'Cannot load instance {instance:d}. Only {len(devices):d} devices found.' - ) + raise ValueError(f'Cannot load instance {instance:d}. Only {len(devices):d} devices found.') # Connect to camera self.cap = pylon.InstantCamera(tlFactory.CreateDevice(devices[instance])) @@ -67,9 +65,7 @@ def __init__(self, instance: int = 0, pixel_format: str = 'Mono8'): # Set exposure values to be stepped over when performing exposure calibration shutter_min = self.cap.ExposureTimeRaw.Min shutter_max = self.cap.ExposureTimeRaw.Max - self._shutter_cal_values = np.linspace( - shutter_min, shutter_max, 2**13 - ).astype(int) + self._shutter_cal_values = np.linspace(shutter_min, shutter_max, 2**13).astype(int) @classmethod def _check_pypylon_version(cls): @@ -78,10 +74,12 @@ def _check_pypylon_version(cls): suggested_pypylon_version = "3.0" # latest release as of 2024/03/21 if pypylon_version < suggested_pypylon_version: - lt.warn("Warning in ImageAcquisition_DCAM_mono.py: " + - f"pypylon version {pypylon_version} is behind the suggested version {suggested_pypylon_version}. " + - "If you have trouble grabbing frames with the basler camera, try upgrading your version of pypylon " + - "with \"python -m pip install --upgrade pypylon\".") + lt.warn( + "Warning in ImageAcquisition_DCAM_mono.py: " + + f"pypylon version {pypylon_version} is behind the suggested version {suggested_pypylon_version}. " + + "If you have trouble grabbing frames with the basler camera, try upgrading your version of pypylon " + + "with \"python -m pip install --upgrade pypylon\"." + ) cls._has_checked_pypylon_version = True diff --git a/opencsp/common/lib/camera/ImageAcquisition_MSMF.py b/opencsp/common/lib/camera/ImageAcquisition_MSMF.py index 02d88cbd..11405b82 100644 --- a/opencsp/common/lib/camera/ImageAcquisition_MSMF.py +++ b/opencsp/common/lib/camera/ImageAcquisition_MSMF.py @@ -31,9 +31,7 @@ def get_frame(self) -> np.ndarray: if np.ndim(frame) == 3: frame = frame.mean(axis=2) elif np.ndim(frame) != 2: - raise ValueError( - f'Output frame must have 2 or 3 dimensions, not {np.ndim(frame):d}.' - ) + raise ValueError(f'Output frame must have 2 or 3 dimensions, not {np.ndim(frame):d}.') return frame @@ -78,9 +76,7 @@ def max_value(self) -> int: @property def shutter_cal_values(self) -> np.ndarray: - raise ValueError( - 'exposure_time cannot be adjusted with MSMF camera; adjust screen brightness instead.' - ) + raise ValueError('exposure_time cannot be adjusted with MSMF camera; adjust screen brightness instead.') def close(self): self.cap.release() diff --git a/opencsp/common/lib/camera/LiveView.py b/opencsp/common/lib/camera/LiveView.py index a2b7e53f..c927f7bc 100644 --- a/opencsp/common/lib/camera/LiveView.py +++ b/opencsp/common/lib/camera/LiveView.py @@ -9,10 +9,7 @@ class LiveView: def __init__( - self, - image_acquisition: ImageAcquisitionAbstract, - update_ms: int = 20, - highlight_saturation: bool = True, + self, image_acquisition: ImageAcquisitionAbstract, update_ms: int = 20, highlight_saturation: bool = True ): """ Shows live stream from a camera. Escape key closes window. @@ -39,9 +36,7 @@ def __init__( self.im = self.ax.imshow(self.grab_frame(), cmap='gray') # Create animation object (must be defined to variable) - self.anim = FuncAnimation( - self.fig, self.update, interval=update_ms, cache_frame_data=False - ) + self.anim = FuncAnimation(self.fig, self.update, interval=update_ms, cache_frame_data=False) # Define close function and bind to keystroke self.fig.canvas.mpl_connect("key_press_event", self.close) @@ -74,6 +69,4 @@ def grab_frame(self) -> np.ndarray: if self.highlight_saturation: return highlight_saturation(frame, self.image_acquisition.max_value) else: - return frame.astype(np.float32) / np.float32( - self.image_acquisition.max_value - ) + return frame.astype(np.float32) / np.float32(self.image_acquisition.max_value) diff --git a/opencsp/common/lib/camera/UCamera.py b/opencsp/common/lib/camera/UCamera.py index 9e4dcaa0..b9142955 100644 --- a/opencsp/common/lib/camera/UCamera.py +++ b/opencsp/common/lib/camera/UCamera.py @@ -45,12 +45,8 @@ def __init__( # Input parameters. self.name = name # String describing camera and lens. - self.sensor_x = ( - sensor_x_mm / 1000.0 - ) # m. Size of sensor in horizontal direction. - self.sensor_y = ( - sensor_y_mm / 1000.0 - ) # m. Size of sensor in vertical direction. + self.sensor_x = sensor_x_mm / 1000.0 # m. Size of sensor in horizontal direction. + self.sensor_y = sensor_y_mm / 1000.0 # m. Size of sensor in vertical direction. self.pixels_x = pixels_x # Number of pizels in horizontal direction for still images. Video may differ. self.pixels_y = pixels_y # Number of pizels in vertical direction for still images. Video may differ. self.focal_length_min = ( @@ -63,15 +59,11 @@ def __init__( self.fov_vertical_min = 2.0 * math.atan( (self.sensor_y / 2.0) / self.focal_length_max ) # Angular field of view of the camera, in the vertical direction. - self.fov_vertical_max = 2.0 * math.atan( - (self.sensor_y / 2.0) / self.focal_length_min - ) # + self.fov_vertical_max = 2.0 * math.atan((self.sensor_y / 2.0) / self.focal_length_min) # self.fov_horizontal_min = 2.0 * math.atan( (self.sensor_x / 2.0) / self.focal_length_max ) # Angular field of view of the camera, in the horizontal direction. - self.fov_horizontal_max = 2.0 * math.atan( - (self.sensor_x / 2.0) / self.focal_length_min - ) # + self.fov_horizontal_max = 2.0 * math.atan((self.sensor_x / 2.0) / self.focal_length_min) # # ACCESS @@ -143,10 +135,7 @@ def mavic_zoom(): sensor_y_mm=4.54, # mm. sqrt(7.66^2 - 6.17^2) pixels_x=4000, # For still images. pixels_y=3000, # For still images. - focal_lengths_mm=[ - 4.33, - 8.60, - ], # [min, max] range, in mm. For fixed focal length min == max. + focal_lengths_mm=[4.33, 8.60], # [min, max] range, in mm. For fixed focal length min == max. ) @@ -157,10 +146,7 @@ def sony_alpha_20mm_landscape(): sensor_y_mm=24.0, # mm. pixels_x=8760, # For still images. pixels_y=4864, # For still images. - focal_lengths_mm=[ - 20, - 20, - ], # [min, max] range, in mm. For fixed focal length min == max. + focal_lengths_mm=[20, 20], # [min, max] range, in mm. For fixed focal length min == max. ) @@ -171,10 +157,7 @@ def sony_alpha_20mm_portrait(): sensor_y_mm=35.9, # mm. pixels_x=4864, # For still images. pixels_y=8760, # For still images. - focal_lengths_mm=[ - 20, - 20, - ], # [min, max] range, in mm. For fixed focal length min == max. + focal_lengths_mm=[20, 20], # [min, max] range, in mm. For fixed focal length min == max. ) @@ -185,10 +168,7 @@ def ultra_wide_angle(): sensor_y_mm=35.9, # mm. pixels_x=4864, # For still images. pixels_y=8760, # For still images. - focal_lengths_mm=[ - 5, - 5, - ], # [min, max] range, in mm. For fixed focal length min == max. + focal_lengths_mm=[5, 5], # [min, max] range, in mm. For fixed focal length min == max. ) @@ -243,9 +223,7 @@ def __init__( self._p_2 = p_2 # Do not access directly. Fetch via distortion_coeffs. # Dependent parameters. self.frame_box_pq = self.construct_frame_box_pq() - self.camera_matrix = np.array( - [[f_x, 0, c_x], [0, f_y, c_y], [0, 0, 1]] - ).reshape(3, 3) + self.camera_matrix = np.array([[f_x, 0, c_x], [0, f_y, c_y], [0, 0, 1]]).reshape(3, 3) self.distortion_coeffs = np.array([[k_1, k_2, p_1, p_2]]) # CONSTRUCTION @@ -479,12 +457,8 @@ def real_sony_alpha_20mm_video(): f_x=4675.73, # Pixels. f_y=4672.100, # Pixels. # Optical center. - c_x=4343.252 - / 8640 - * 7680, # Pixels. Before camera calibration, assume c_x = w/2 = 3840/2 - c_y=2883.190 - / 5760 - * 4320, # Pixels. Before camera calibration, assume c_y = h/2 = 2160/2 + c_x=4343.252 / 8640 * 7680, # Pixels. Before camera calibration, assume c_x = w/2 = 3840/2 + c_y=2883.190 / 5760 * 4320, # Pixels. Before camera calibration, assume c_y = h/2 = 2160/2 # Radial distortion. k_1=-0.0568132871107041, # ?? SCAFFOLDING RCB -- WHAT ARE UNITS? ARE VALUES VALID FOR EXPECTED UNITS? k_2=0.0395572945993200, # ?? SCAFFOLDING RCB -- WHAT ARE UNITS? ARE VALUES VALID FOR EXPECTED UNITS? diff --git a/opencsp/common/lib/camera/image_processing.py b/opencsp/common/lib/camera/image_processing.py index 6400955c..d3b42b1a 100644 --- a/opencsp/common/lib/camera/image_processing.py +++ b/opencsp/common/lib/camera/image_processing.py @@ -34,9 +34,7 @@ def encode_RG_to_RGB(image: np.ndarray) -> np.ndarray: return np.concatenate((im_r, im_g, im_b), 2) -def highlight_saturation( - image: np.ndarray, saturation_value: int | float -) -> np.ndarray: +def highlight_saturation(image: np.ndarray, saturation_value: int | float) -> np.ndarray: """ Highlights saturated pixels red. Image can be 2d or 3d, a 3d image is returned. @@ -60,9 +58,7 @@ def highlight_saturation( elif np.ndim(image) == 3: rgb = image else: - raise ValueError( - f'Input image must have 1 or 3 channels, but image has shape: {image.shape}' - ) + raise ValueError(f'Input image must have 1 or 3 channels, but image has shape: {image.shape}') # Mask saturated pixels mask = (rgb >= saturation_value).max(2) diff --git a/opencsp/common/lib/camera/test/test_Camera.py b/opencsp/common/lib/camera/test/test_Camera.py index 5c04162f..3199fe11 100644 --- a/opencsp/common/lib/camera/test/test_Camera.py +++ b/opencsp/common/lib/camera/test/test_Camera.py @@ -17,12 +17,8 @@ def setup_class(cls): distortion_coef_real = np.array([0.01, 0.02, 0.001, 0.002]) image_shape_xy = (1000, 500) - cls.camera_ideal = Camera( - intrinsic_mat, distortion_coef_zeros, image_shape_xy, 'Test Ideal Camera' - ) - cls.camera_real = Camera( - intrinsic_mat, distortion_coef_real, image_shape_xy, 'Test Real Camera' - ) + cls.camera_ideal = Camera(intrinsic_mat, distortion_coef_zeros, image_shape_xy, 'Test Ideal Camera') + cls.camera_real = Camera(intrinsic_mat, distortion_coef_real, image_shape_xy, 'Test Real Camera') # Define upper left 3D point and image location cls.Vxyz_ul = Vxyz((-1, -0.5, 2)) diff --git a/opencsp/common/lib/csp/Facet.py b/opencsp/common/lib/csp/Facet.py index 235c6f06..bda94bca 100644 --- a/opencsp/common/lib/csp/Facet.py +++ b/opencsp/common/lib/csp/Facet.py @@ -8,9 +8,7 @@ from opencsp.common.lib.csp.MirrorAbstract import MirrorAbstract from opencsp.common.lib.csp.OpticOrientation import OpticOrientation from opencsp.common.lib.csp.RayTraceable import RayTraceable -from opencsp.common.lib.csp.VisualizeOrthorectifiedSlopeAbstract import ( - VisualizeOrthorectifiedSlopeAbstract, -) +from opencsp.common.lib.csp.VisualizeOrthorectifiedSlopeAbstract import VisualizeOrthorectifiedSlopeAbstract from opencsp.common.lib.geometry.LoopXY import LoopXY from opencsp.common.lib.geometry.Pxyz import Pxyz from opencsp.common.lib.geometry.RegionXY import RegionXY @@ -59,9 +57,7 @@ def axis_aligned_bounding_box(self) -> tuple[float, float, float, float]: Left, right, bottom, top. Facet's child coordinate reference frame. """ # Get XYZ locations of all points making up mirror region - points_xy = Vxy.merge( - [loop.vertices for loop in self.mirror.region.loops] - ) # mirror base + points_xy = Vxy.merge([loop.vertices for loop in self.mirror.region.loops]) # mirror base points_z = self.mirror.surface_displacement_at(points_xy) # mirror base points_xyz = Vxyz((points_xy.x, points_xy.y, points_z)) # mirror base @@ -72,58 +68,35 @@ def axis_aligned_bounding_box(self) -> tuple[float, float, float, float]: return xyz.x.min(), xyz.x.max(), xyz.y.min(), xyz.y.max() # child def survey_of_points( - self, - resolution: int, - resolution_type: str = 'pixelX', - random_seed: int | None = None, + self, resolution: int, resolution_type: str = 'pixelX', random_seed: int | None = None ) -> tuple[Pxyz, Vxyz]: # Get sample point locations (z=0 plane in "child" reference frame) bbox = self.axis_aligned_bounding_box # left, right, bottom, top, "child" width = bbox[1] - bbox[0] height = bbox[3] - bbox[2] - region = RegionXY( - LoopXY.from_rectangle(bbox[0], bbox[2], width, height) - ) # facet child - points_child_xy = region.points_sample( - resolution, resolution_type, random_seed - ) # facet child - points_child_xyz = Vxyz( - (points_child_xy.x, points_child_xy.y, np.zeros(len(points_child_xy))) - ) # facet child + region = RegionXY(LoopXY.from_rectangle(bbox[0], bbox[2], width, height)) # facet child + points_child_xy = region.points_sample(resolution, resolution_type, random_seed) # facet child + points_child_xyz = Vxyz((points_child_xy.x, points_child_xy.y, np.zeros(len(points_child_xy)))) # facet child # Filter points that are inside mirror region - points_mirror_base_xyz = self.transform_mirror_base_to_child.inv().apply( - points_child_xyz - ) # mirror base - mask = self.mirror.region.is_inside_or_on_border( - points_mirror_base_xyz.projXY() - ) + points_mirror_base_xyz = self.transform_mirror_base_to_child.inv().apply(points_child_xyz) # mirror base + mask = self.mirror.region.is_inside_or_on_border(points_mirror_base_xyz.projXY()) points_mirror_base_xyz = points_mirror_base_xyz[mask] # mirror base # Calculate points and normals at sample locations points_mirror_base, normals_mirror_base = self.mirror.point_and_normal_in_space( points_mirror_base_xyz.projXY() ) # facet child - points_mirror_base = self.transform_mirror_base_to_child.inv().apply( - points_mirror_base - ) # mirror base - normals_mirror_base.rotate_in_place( - self.transform_mirror_base_to_child.inv().R - ) # mirror base + points_mirror_base = self.transform_mirror_base_to_child.inv().apply(points_mirror_base) # mirror base + normals_mirror_base.rotate_in_place(self.transform_mirror_base_to_child.inv().R) # mirror base # Convert from mirror to fixed reference frame - points = self.transform_mirror_base_to_parent.apply( - points_mirror_base - ) # facet parent - normals = normals_mirror_base.rotate( - self.transform_mirror_base_to_parent.R - ) # facet parent + points = self.transform_mirror_base_to_parent.apply(points_mirror_base) # facet parent + normals = normals_mirror_base.rotate(self.transform_mirror_base_to_parent.R) # facet parent return points, normals # facet parent - def orthorectified_slope_array( - self, x_vec: np.ndarray, y_vec: np.ndarray - ) -> np.ndarray: + def orthorectified_slope_array(self, x_vec: np.ndarray, y_vec: np.ndarray) -> np.ndarray: """Returns X and Y surface slopes in ndarray format given X and Y sampling axes in the facet's child coordinate reference frame. @@ -144,16 +117,12 @@ def orthorectified_slope_array( points_samp = Vxyz((x_mat, y_mat, z_mat)) # facet child # Get mask of points on mirror - points_samp_mirror = self.transform_mirror_base_to_child.inv().apply( - points_samp - ) # mirror base + points_samp_mirror = self.transform_mirror_base_to_child.inv().apply(points_samp) # mirror base mask = self.mirror.in_bounds(points_samp_mirror.projXY()) points_samp_mirror = points_samp_mirror[mask] # Get normal vectors - normals = self.mirror.surface_norm_at( - points_samp_mirror.projXY() - ) # mirror base + normals = self.mirror.surface_norm_at(points_samp_mirror.projXY()) # mirror base normals.rotate_in_place(self.transform_mirror_base_to_child.R) # facet child # Calculate slopes and output as 2D array @@ -162,12 +131,7 @@ def orthorectified_slope_array( slope_data = np.reshape(slope_data, (2, y_vec.size, x_vec.size)) # facet child return slope_data # facet child - def draw( - self, - view: View3d, - mirror_style: RenderControlMirror, - transform: TransformXYZ | None = None, - ) -> None: + def draw(self, view: View3d, mirror_style: RenderControlMirror, transform: TransformXYZ | None = None) -> None: """ Draws facet mirror onto a View3d object. @@ -205,9 +169,7 @@ def set_pointing(self, *args) -> None: self.ori.transform_child_to_base using the given arguments. """ if self.pointing_function is None: - raise ValueError( - 'self.pointing_function is not defined. Use self.define_pointing_function.' - ) + raise ValueError('self.pointing_function is not defined. Use self.define_pointing_function.') self.ori.transform_child_to_base = self.pointing_function(*args) diff --git a/opencsp/common/lib/csp/FacetEnsemble.py b/opencsp/common/lib/csp/FacetEnsemble.py index 0c38371d..0109d22e 100644 --- a/opencsp/common/lib/csp/FacetEnsemble.py +++ b/opencsp/common/lib/csp/FacetEnsemble.py @@ -5,9 +5,7 @@ from scipy.spatial.transform import Rotation from opencsp.common.lib.csp.OpticOrientation import OpticOrientation -from opencsp.common.lib.csp.VisualizeOrthorectifiedSlopeAbstract import ( - VisualizeOrthorectifiedSlopeAbstract, -) +from opencsp.common.lib.csp.VisualizeOrthorectifiedSlopeAbstract import VisualizeOrthorectifiedSlopeAbstract from opencsp.common.lib.csp.Facet import Facet from opencsp.common.lib.csp.RayTraceable import RayTraceable from opencsp.common.lib.geometry.LoopXY import LoopXY @@ -44,17 +42,11 @@ def transform_mirror_base_to_child(self) -> list[TransformXYZ]: @property def transform_mirror_base_to_base(self) -> list[TransformXYZ]: - return [ - self.ori.transform_child_to_base * trans - for trans in self.transform_mirror_base_to_child - ] + return [self.ori.transform_child_to_base * trans for trans in self.transform_mirror_base_to_child] @property def transform_mirror_base_to_parent(self) -> list[TransformXYZ]: - return [ - self.ori.transform_child_to_parent * trans - for trans in self.transform_mirror_base_to_child - ] + return [self.ori.transform_child_to_parent * trans for trans in self.transform_mirror_base_to_child] @property def axis_aligned_bounding_box(self) -> tuple[float, float, float, float]: @@ -70,14 +62,10 @@ def axis_aligned_bounding_box(self) -> tuple[float, float, float, float]: xyz = [] # ensemble child for facet in self.facets: # Get all mirror region vertices - points_xy = Pxy.merge( - [loop.vertices for loop in facet.mirror.region.loops] - ) # mirror base + points_xy = Pxy.merge([loop.vertices for loop in facet.mirror.region.loops]) # mirror base points_z = facet.mirror.surface_displacement_at(points_xy) # mirror base points_xyz = Pxyz((points_xy.x, points_xy.y, points_z)) # mirror base - points_xyz = facet.transform_mirror_base_to_parent.apply( - points_xyz - ) # ensemble child + points_xyz = facet.transform_mirror_base_to_parent.apply(points_xyz) # ensemble child xyz.append(points_xyz) # ensemble child xyz = Pxyz.merge(xyz) # ensemble child @@ -85,24 +73,15 @@ def axis_aligned_bounding_box(self) -> tuple[float, float, float, float]: return xyz.x.min(), xyz.x.max(), xyz.y.min(), xyz.y.max() # ensemble child def survey_of_points( - self, - resolution: int, - resolution_type: str = 'pixelX', - random_seed: int | None = None, + self, resolution: int, resolution_type: str = 'pixelX', random_seed: int | None = None ) -> tuple[Pxyz, Vxyz]: # Get sample point locations (z=0 plane in "ensemble child" reference frame) bbox = self.axis_aligned_bounding_box # left, right, bottom, top width = bbox[1] - bbox[0] height = bbox[3] - bbox[2] - region = RegionXY( - LoopXY.from_rectangle(bbox[0], bbox[2], width, height) - ) # ensemble child - points_samp_xy = region.points_sample( - resolution, resolution_type, random_seed - ) # ensemble child - points_samp_xyz = Vxyz( - (points_samp_xy.x, points_samp_xy.y, np.zeros(len(points_samp_xy))) - ) # ensemble child + region = RegionXY(LoopXY.from_rectangle(bbox[0], bbox[2], width, height)) # ensemble child + points_samp_xy = region.points_sample(resolution, resolution_type, random_seed) # ensemble child + points_samp_xyz = Vxyz((points_samp_xy.x, points_samp_xy.y, np.zeros(len(points_samp_xy)))) # ensemble child idx_facet = 0 points_list = [] @@ -110,42 +89,30 @@ def survey_of_points( for idx_facet in range(self.num_facets): # Filter points that are inside mirror region points_mirror_base = ( - self.transform_mirror_base_to_child[idx_facet] - .inv() - .apply(points_samp_xyz) + self.transform_mirror_base_to_child[idx_facet].inv().apply(points_samp_xyz) ) # mirror base - mask = self.facets[idx_facet].mirror.region.is_inside_or_on_border( - points_mirror_base.projXY() - ) + mask = self.facets[idx_facet].mirror.region.is_inside_or_on_border(points_mirror_base.projXY()) points_mirror_base = points_mirror_base[mask] # mirror base # Calculate points and normals at sample locations - points_mirror_base = self.facets[idx_facet].mirror.location_at( - points_mirror_base.projXY() - ) # mirror base + points_mirror_base = self.facets[idx_facet].mirror.location_at(points_mirror_base.projXY()) # mirror base normals_mirror_base = self.facets[idx_facet].mirror.surface_norm_at( points_mirror_base.projXY() ) # mirror base # Convert from mirror to world reference frame points_list.append( - self.transform_mirror_base_to_parent[idx_facet].apply( - points_mirror_base - ) + self.transform_mirror_base_to_parent[idx_facet].apply(points_mirror_base) ) # ensemble parent normals_list.append( - normals_mirror_base.rotate( - self.transform_mirror_base_to_parent[idx_facet].R - ) + normals_mirror_base.rotate(self.transform_mirror_base_to_parent[idx_facet].R) ) # ensemble parent points = Vxyz.merge(points_list) normals = Vxyz.merge(normals_list) return points, normals # world coordinates - def orthorectified_slope_array( - self, x_vec: np.ndarray, y_vec: np.ndarray - ) -> np.ndarray: + def orthorectified_slope_array(self, x_vec: np.ndarray, y_vec: np.ndarray) -> np.ndarray: # Get sample points x_mat, y_mat = np.meshgrid(x_vec, y_vec) # ensemble child z_mat = np.zeros(x_mat.shape) # ensemble child @@ -154,35 +121,22 @@ def orthorectified_slope_array( slope_data = np.zeros((2, len(points_samp))) * np.nan # ensemble child for idx_facet in range(self.num_facets): # Get mask of points on mirror - points_samp_mirror = ( - self.transform_mirror_base_to_child[idx_facet].inv().apply(points_samp) - ) # mirror base + points_samp_mirror = self.transform_mirror_base_to_child[idx_facet].inv().apply(points_samp) # mirror base mask = self.facets[idx_facet].mirror.in_bounds(points_samp_mirror.projXY()) points_samp_mirror = points_samp_mirror[mask] # Get normal vectors - normals = self.facets[idx_facet].mirror.surface_norm_at( - points_samp_mirror.projXY() - ) # mirror base - normals.rotate_in_place( - self.transform_mirror_base_to_child[idx_facet].R - ) # ensemble child + normals = self.facets[idx_facet].mirror.surface_norm_at(points_samp_mirror.projXY()) # mirror base + normals.rotate_in_place(self.transform_mirror_base_to_child[idx_facet].R) # ensemble child # Calculate slopes and output as 2D array - slope_data[:, mask] = ( - -normals.data[:2] / normals.data[2:3] - ) # ensemble child + slope_data[:, mask] = -normals.data[:2] / normals.data[2:3] # ensemble child - slope_data = np.reshape( - slope_data, (2, y_vec.size, x_vec.size) - ) # ensemble child + slope_data = np.reshape(slope_data, (2, y_vec.size, x_vec.size)) # ensemble child return slope_data # ensemble child def draw( - self, - view: View3d, - mirror_style: RenderControlMirror, - transform: list[TransformXYZ] | None = None, + self, view: View3d, mirror_style: RenderControlMirror, transform: list[TransformXYZ] | None = None ) -> None: """ Draws facet ensemble onto a View3d object. @@ -225,9 +179,7 @@ def set_pointing(self, *args) -> None: self.ori.transform_child_to_base using the given arguments. """ if self.pointing_function is None: - raise ValueError( - 'self.pointing_function is not defined. Use self.define_pointing_function.' - ) + raise ValueError('self.pointing_function is not defined. Use self.define_pointing_function.') self.ori.transform_child_to_base = self.pointing_function(*args) diff --git a/opencsp/common/lib/csp/LightPath.py b/opencsp/common/lib/csp/LightPath.py index 6de75bd0..91e0dd05 100644 --- a/opencsp/common/lib/csp/LightPath.py +++ b/opencsp/common/lib/csp/LightPath.py @@ -50,15 +50,11 @@ def __init__( intensity would be [i@v1, i_after_p1, i_after_p2 , i_after_p3 or i@v2]. (list[float]) """ if len(init_direction) != 1: - raise ValueError( - f"Initial direction argument should be a single vector. Given value was {init_direction}." - ) + raise ValueError(f"Initial direction argument should be a single vector. Given value was {init_direction}.") self.points_list = points_list # TODO: assert -1e-6 < np.linalg.norm(init_direction) - 1 < 1e-6 self.init_direction = init_direction - self.current_direction = ( - init_direction if current_direction is None else current_direction - ) + self.current_direction = init_direction if current_direction is None else current_direction # TODO: assert 1e-6 < np.linalg.norm(self.current_direction) - 1 < 1e-6 self.color = color self.intensity = intensity @@ -70,25 +66,17 @@ def __str__(self) -> str: # TODO tjlarki: make a more useful string representat return f"{self.init_direction} --> \n{self.points_list} --> \n{self.current_direction}" def many_rays_from_many_vectors( - many_points_lists: list[Pxyz], - many_init_directions: Vxyz, - many_current_directions: Vxyz = [], + many_points_lists: list[Pxyz], many_init_directions: Vxyz, many_current_directions: Vxyz = [] ) -> list['LightPath']: """ Creates a list of LightPaths from vectors If the many_points_lists is None then the function will infer that they are all just the current vectors and have no history. """ - if ( - many_points_lists == None - ): # None implies there are no recorded points at all + if many_points_lists == None: # None implies there are no recorded points at all many_points_lists = [Pxyz.empty()] * len(many_init_directions) - elif len(many_points_lists) > 0 and len(many_points_lists) != len( - many_init_directions - ): - raise ValueError( - f"The number of points lists and initial vectors must be the same." - ) + elif len(many_points_lists) > 0 and len(many_points_lists) != len(many_init_directions): + raise ValueError(f"The number of points lists and initial vectors must be the same.") diff_vectors = len(many_init_directions) - len(many_current_directions) many_current_directions = ( @@ -104,11 +92,7 @@ def many_rays_from_many_vectors( # print(f"MANY RESULT : {res}") return res - def draw( - self, - view: View3d, - path_style: rclp.RenderControlLightPath = rclp.default_path(), - ) -> None: + def draw(self, view: View3d, path_style: rclp.RenderControlLightPath = rclp.default_path()) -> None: # print("drawing ray") # TODO tristan print for debug # print(f"Points: \n{self.points_list}") # TODO tristan debug print points_array = list(self.points_list.data.T) @@ -120,8 +104,7 @@ def draw( [points_array[0] - init_direction_array * path_style.init_length] + points_array # initial direction + [ # each point passed through - points_array[-1] - + current_direction_array * path_style.current_length + points_array[-1] + current_direction_array * path_style.current_length ], # current direction style=path_style.line_render_control, ) @@ -130,9 +113,7 @@ def draw( if path_style.end_at_plane != None: plane_point, plane_normal_vector = path_style.end_at_plane - plane_normal_vector = ( - plane_normal_vector.as_Vxyz() - ) # cannot have the directions be Uxyz objects + plane_normal_vector = plane_normal_vector.as_Vxyz() # cannot have the directions be Uxyz objects current_direction = self.current_direction.as_Vxyz() d: float = Vxyz.dot(plane_normal_vector, current_direction) diff --git a/opencsp/common/lib/csp/LightPathEnsemble.py b/opencsp/common/lib/csp/LightPathEnsemble.py index a572f28d..bd60ee8d 100644 --- a/opencsp/common/lib/csp/LightPathEnsemble.py +++ b/opencsp/common/lib/csp/LightPathEnsemble.py @@ -31,9 +31,7 @@ def __iadd__(self, lpe: 'LightPathEnsemble'): @classmethod @strict_types - def from_parts( - cls, init_directions: Uxyz, points: list[Pxyz], curr_directions: Uxyz, colors=[] - ): + def from_parts(cls, init_directions: Uxyz, points: list[Pxyz], curr_directions: Uxyz, colors=[]): lpe = LightPathEnsemble([]) lpe.current_directions = curr_directions lpe.init_directions = init_directions @@ -63,14 +61,10 @@ def add_steps(self, points: Pxyz, new_current_directions: Uxyz): else: self.points_lists[i] = old_points.concatenate(new_point) - self.current_directions = ( - new_current_directions # update the current directions - ) + self.current_directions = new_current_directions # update the current directions def concatenate_in_place(self: 'LightPathEnsemble', lpe1: 'LightPathEnsemble'): - self.current_directions = self.current_directions.concatenate( - lpe1.current_directions - ) + self.current_directions = self.current_directions.concatenate(lpe1.current_directions) self.init_directions = self.init_directions.concatenate(lpe1.init_directions) self.points_lists += lpe1.points_lists self.colors += lpe1.colors @@ -78,9 +72,7 @@ def concatenate_in_place(self: 'LightPathEnsemble', lpe1: 'LightPathEnsemble'): def asLightPathList(self) -> list[LightPath]: lps: list[LightPath] = [] - for cd, id, pl in zip( - self.current_directions, self.init_directions, self.points_lists - ): + for cd, id, pl in zip(self.current_directions, self.init_directions, self.points_lists): lp = LightPath(pl, id, cd) lps.append(lp) return lps @@ -91,9 +83,7 @@ def __add__(self, lpe: 'LightPathEnsemble'): def concatenate(self: 'LightPathEnsemble', lpe1: 'LightPathEnsemble'): new_lpe = LightPathEnsemble([]) - new_lpe.current_directions = self.current_directions.concatenate( - lpe1.current_directions - ) + new_lpe.current_directions = self.current_directions.concatenate(lpe1.current_directions) new_lpe.init_directions = self.init_directions.concatenate(lpe1.init_directions) new_lpe.points_lists = self.points_lists + (lpe1.points_lists) new_lpe.colors = self.colors + (lpe1.colors) @@ -101,9 +91,7 @@ def concatenate(self: 'LightPathEnsemble', lpe1: 'LightPathEnsemble'): def asLightPathList(self) -> list[LightPath]: lps: list[LightPath] = [] - for cd, id, pl in zip( - self.current_directions, self.init_directions, self.points_lists - ): + for cd, id, pl in zip(self.current_directions, self.init_directions, self.points_lists): lp = LightPath(pl, id, cd) lps.append(lp) return lps diff --git a/opencsp/common/lib/csp/LightSourcePoint.py b/opencsp/common/lib/csp/LightSourcePoint.py index c01eed05..d475d767 100644 --- a/opencsp/common/lib/csp/LightSourcePoint.py +++ b/opencsp/common/lib/csp/LightSourcePoint.py @@ -8,17 +8,13 @@ class LightSourcePoint(LightSource): def __init__(self, location_in_space: Pxyz) -> None: if not isinstance(location_in_space, Vxyz): - raise TypeError( - f"Input location_in_space must be subclass of {Vxyz} but is {type(location_in_space)}" - ) + raise TypeError(f"Input location_in_space must be subclass of {Vxyz} but is {type(location_in_space)}") self.location_in_space = location_in_space def get_incident_rays(self, point: Pxyz) -> list[LightPath]: # Check inputs if not isinstance(point, Vxyz): - raise TypeError( - f"Input point must be subclass of {Vxyz} but is {type(point)}" - ) + raise TypeError(f"Input point must be subclass of {Vxyz} but is {type(point)}") init_vector = Uxyz.normalize(point - self.location_in_space) return [LightPath(self.location_in_space, Vxyz([0, 0, 0]), init_vector)] diff --git a/opencsp/common/lib/csp/LightSourceSun.py b/opencsp/common/lib/csp/LightSourceSun.py index 69b27829..c486d688 100644 --- a/opencsp/common/lib/csp/LightSourceSun.py +++ b/opencsp/common/lib/csp/LightSourceSun.py @@ -22,11 +22,7 @@ def get_incident_rays(self, point: Pxyz) -> list[LightPath]: @classmethod def from_given_sun_position( - cls, - sun_pointing: Uxyz, - resolution: int, - sun_dia: float = 0.009308, - verbose=False, + cls, sun_pointing: Uxyz, resolution: int, sun_dia: float = 0.009308, verbose=False ) -> 'LightSourceSun': """Returns LightSourceSun object initialized from a given pointing direction. Represents the sun as a tophat function in space. @@ -86,9 +82,7 @@ def from_location_time( # Calculate direction of sun pointing alt = pysolar.solar.get_altitude(loc[0], loc[1], time) azm = pysolar.solar.get_azimuth(loc[0], loc[1], time) - sun_pointing = -Vxyz((0, 1, 0)).rotate( - Rotation.from_euler('xz', [alt, -azm], degrees=True) - ) + sun_pointing = -Vxyz((0, 1, 0)).rotate(Rotation.from_euler('xz', [alt, -azm], degrees=True)) # Calculate sun ray cone pointing down (z=-1) sun_rays = cls._calc_sun_ray_cone(resolution, sun_dia, verbose) @@ -106,9 +100,7 @@ def from_location_time( return obj @staticmethod - def _calc_sun_ray_cone( - resolution: int, sun_dia: float, verbose: bool = False - ) -> Vxyz: + def _calc_sun_ray_cone(resolution: int, sun_dia: float, verbose: bool = False) -> Vxyz: # Calculate sun radius sun_radius = sun_dia / 2 @@ -143,12 +135,7 @@ def _calc_sun_ray_cone( return sun_rays def set_incident_rays( - self, - loc: tuple[float, float], - time: tuple, - resolution: int, - sun_dia: float = 0.009308, - verbose=False, + self, loc: tuple[float, float], time: tuple, resolution: int, sun_dia: float = 0.009308, verbose=False ) -> None: """ Defines the rays that will be used from this light source for ray tracing. @@ -207,17 +194,11 @@ def set_incident_rays( # rotate the cone of sun rays print("Rotating sun rays...") angle: float = np.arccos(np.dot(np.array([0, 0, -1]), real_center_vector)) - cross_prod = np.cross( - real_center_vector, np.array([0, 0, 1]) - ) # angle to rotate the rays - axis_of_rotation = cross_prod / np.linalg.norm( - cross_prod - ) # axis to rotate around + cross_prod = np.cross(real_center_vector, np.array([0, 0, 1])) # angle to rotate the rays + axis_of_rotation = cross_prod / np.linalg.norm(cross_prod) # axis to rotate around rotation_from_sun_position = Rotation.from_rotvec(angle * axis_of_rotation) # print(f"rot from sun: {rotation_from_sun_position}") # print(f"SUN RAYS: {sun_rays}") # TODO rotated_sun_rays = sun_rays.rotate(rotation_from_sun_position) - self.incident_rays = LightPath.many_rays_from_many_vectors( - None, rotated_sun_rays - ) + self.incident_rays = LightPath.many_rays_from_many_vectors(None, rotated_sun_rays) print("Sun rays are initialized\n") diff --git a/opencsp/common/lib/csp/MirrorAbstract.py b/opencsp/common/lib/csp/MirrorAbstract.py index aceb91e2..9a32228e 100644 --- a/opencsp/common/lib/csp/MirrorAbstract.py +++ b/opencsp/common/lib/csp/MirrorAbstract.py @@ -9,9 +9,7 @@ from opencsp.common.lib.csp.OpticOrientation import OpticOrientation from opencsp.common.lib.csp.RayTraceable import RayTraceable -from opencsp.common.lib.csp.VisualizeOrthorectifiedSlopeAbstract import ( - VisualizeOrthorectifiedSlopeAbstract, -) +from opencsp.common.lib.csp.VisualizeOrthorectifiedSlopeAbstract import VisualizeOrthorectifiedSlopeAbstract from opencsp.common.lib.geometry.Pxy import Pxy from opencsp.common.lib.geometry.Pxyz import Pxyz from opencsp.common.lib.geometry.RegionXY import RegionXY @@ -182,16 +180,11 @@ def point_and_normal_in_space(self, p: Pxy) -> tuple[Pxyz, Vxyz]: return (point, normal) def survey_of_points( - self, - resolution: int, - resolution_type: str = 'pixelX', - random_seed: int | None = None, + self, resolution: int, resolution_type: str = 'pixelX', random_seed: int | None = None ) -> tuple[Pxyz, Vxyz]: # Get points that will be on the mirror when lifted from the XY plane filtered_points = self.region.points_sample( - resolution=resolution, - resolution_type=resolution_type, - random_seed=random_seed, + resolution=resolution, resolution_type=resolution_type, random_seed=random_seed ) # Return lifted points and normal vectors in "facet mirror mount" coordinate reference frame points = self.location_in_space(filtered_points) @@ -199,10 +192,7 @@ def survey_of_points( return points, norms def survey_of_points_local( - self, - resolution: int, - resolution_type: str = 'pixelX', - random_seed: int | None = None, + self, resolution: int, resolution_type: str = 'pixelX', random_seed: int | None = None ) -> tuple[Pxyz, Vxyz]: """Returns a set of points sampled from inside the optic region in the mirror's base coordinate reference frame. @@ -216,18 +206,14 @@ def survey_of_points_local( """ # Get points that will be on the mirror when lifted from the XY plane filtered_points = self.region.points_sample( - resolution=resolution, - resolution_type=resolution_type, - random_seed=random_seed, + resolution=resolution, resolution_type=resolution_type, random_seed=random_seed ) # Return lifted points and normal vectors in local coordinates points = self.location_at(filtered_points) norms = self.surface_norm_at(filtered_points) return points, norms - def orthorectified_slope_array( - self, x_vec: np.ndarray, y_vec: np.ndarray - ) -> np.ndarray: + def orthorectified_slope_array(self, x_vec: np.ndarray, y_vec: np.ndarray) -> np.ndarray: """Returns X and Y surface slopes in ndarray format given X and Y sampling axes in the mirror's base coordinate reference frame. @@ -244,9 +230,7 @@ def orthorectified_slope_array( """ # Check vectors are 1 dimensional if (np.ndim(x_vec) != 1) or (np.ndim(y_vec) != 1): - raise ValueError( - f'X and Y vectors must be 1d, but had shapes: {x_vec.shape}, {y_vec.shape}.' - ) + raise ValueError(f'X and Y vectors must be 1d, but had shapes: {x_vec.shape}, {y_vec.shape}.') # Create interpolation axes x_mat, y_mat = np.meshgrid(x_vec, y_vec) # meters @@ -257,20 +241,13 @@ def orthorectified_slope_array( # Calculate normals normals = np.zeros((3, len(pts))) * np.nan - normals[:, mask] = self.surface_norm_at( - pts[mask] - ).data # 3 x M*N, normalized vectors + normals[:, mask] = self.surface_norm_at(pts[mask]).data # 3 x M*N, normalized vectors # Calculate slopes slopes = -normals[:2] / normals[2:3] # normalize z coordinate return slopes.reshape((2, y_vec.size, x_vec.size)) # 2 x M x N - def draw( - self, - view: View3d, - mirror_style: RenderControlMirror, - transform: TransformXYZ | None = None, - ) -> None: + def draw(self, view: View3d, mirror_style: RenderControlMirror, transform: TransformXYZ | None = None) -> None: """ Draws a mirror onto a View3d object. @@ -291,26 +268,18 @@ def draw( # Sample points within and on edge of region edge_values = self.region.edge_sample(resolution) # 2d, mirror coordinates - inner_values = self.region.points_sample( - resolution, 'pixelX' - ) # 2d, mirror coordinates + inner_values = self.region.points_sample(resolution, 'pixelX') # 2d, mirror coordinates domain = edge_values.concatenate(inner_values) # 2d, mirror coordinates points_surf = self.location_at(domain) # 3d, mirror coordinates edge_values_lifted = self.location_at(edge_values) # 3d, mirror coordinates points_surf = transform.apply(points_surf) # 3d, current reference frame - edge_values_lifted = transform.apply( - edge_values_lifted - ) # 3d, current reference frame + edge_values_lifted = transform.apply(edge_values_lifted) # 3d, current reference frame # Draw surface triangulation tri = Triangulation(domain.x, domain.y) # create triangles - view.draw_xyz_trisurface( - *points_surf.data, - surface_style=mirror_style.surface_style, - triangles=tri.triangles, - ) + view.draw_xyz_trisurface(*points_surf.data, surface_style=mirror_style.surface_style, triangles=tri.triangles) # Draw surface boundary if mirror_style.point_styles is not None: @@ -320,21 +289,14 @@ def draw( # Draw surface normals if mirror_style.surface_normals: # Get sample points and normals - points, normals = self.survey_of_points_local( - mirror_style.norm_res, 'pixelX', None - ) # mirror coordinates + points, normals = self.survey_of_points_local(mirror_style.norm_res, 'pixelX', None) # mirror coordinates points = transform.apply(points) # current reference frame normals.rotate_in_place(transform.R) # current reference frame # Put in list - xyzdxyz = [ - [point.data, normal.data * mirror_style.norm_len] - for point, normal in zip(points, normals) - ] + xyzdxyz = [[point.data, normal.data * mirror_style.norm_len] for point, normal in zip(points, normals)] # Draw on plot - view.draw_xyzdxyz_list( - xyzdxyz, close=False, style=mirror_style.norm_base_style - ) + view.draw_xyzdxyz_list(xyzdxyz, close=False, style=mirror_style.norm_base_style) def set_position_in_space(self, translation: Pxyz, rotation: Rotation) -> None: # Check input type diff --git a/opencsp/common/lib/csp/MirrorParametric.py b/opencsp/common/lib/csp/MirrorParametric.py index b0c08d72..b1f19f21 100644 --- a/opencsp/common/lib/csp/MirrorParametric.py +++ b/opencsp/common/lib/csp/MirrorParametric.py @@ -23,9 +23,7 @@ class MirrorParametric(MirrorAbstract): """ def __init__( - self, - surface_function: Callable[[np.ndarray, np.ndarray], np.ndarray], - shape: RegionXY, + self, surface_function: Callable[[np.ndarray, np.ndarray], np.ndarray], shape: RegionXY ) -> 'MirrorParametric': """Instantiates MirrorParametric class @@ -47,11 +45,11 @@ def __init__( self._normals_function = self._define_normals_function(surface_function) def __repr__(self) -> str: - return f"Parametricly defined mirror defined by the function {inspect.getsourcelines(self._surface_function)[0]}" + return ( + f"Parametricly defined mirror defined by the function {inspect.getsourcelines(self._surface_function)[0]}" + ) - def _define_normals_function( - self, surface_function: Callable[[float, float], float] - ) -> Callable: + def _define_normals_function(self, surface_function: Callable[[float, float], float]) -> Callable: """Returns a normal vector generating function given a surface z coordinate function @@ -100,9 +98,7 @@ def _normals_function(x: np.ndarray, y: np.ndarray) -> np.ndarray: dfdy_n *= np.ones(y.shape) # Create constant z coordinate z_norm = np.ones(x.shape) - return np.concatenate( - (-dfdx_n[..., None], -dfdy_n[..., None], z_norm[..., None]), axis=-1 - ) + return np.concatenate((-dfdx_n[..., None], -dfdy_n[..., None], z_norm[..., None]), axis=-1) return _normals_function @@ -123,9 +119,7 @@ def surface_displacement_at(self, p: Pxy) -> np.ndarray[float]: return self._surface_function(p.x, p.y) @classmethod - def generate_symmetric_paraboloid( - cls, focal_length: float, shape: RegionXY - ) -> 'MirrorParametric': + def generate_symmetric_paraboloid(cls, focal_length: float, shape: RegionXY) -> 'MirrorParametric': """Generate a symmetric parabolic mirror with the given focal length Parameters diff --git a/opencsp/common/lib/csp/MirrorParametricRectangular.py b/opencsp/common/lib/csp/MirrorParametricRectangular.py index 10888fe5..08716d63 100644 --- a/opencsp/common/lib/csp/MirrorParametricRectangular.py +++ b/opencsp/common/lib/csp/MirrorParametricRectangular.py @@ -13,11 +13,7 @@ class MirrorParametricRectangular(MirrorParametric): Mirror implementation defined by a parametric function and rectangular side lengths. """ - def __init__( - self, - surface_function: Callable[[float, float], float], - size: tuple[float, float] | float, - ) -> None: + def __init__(self, surface_function: Callable[[float, float], float], size: tuple[float, float] | float) -> None: """Instantiates a MirrorParametricRectangular object. Parameters diff --git a/opencsp/common/lib/csp/MirrorPoint.py b/opencsp/common/lib/csp/MirrorPoint.py index bc8a6dd4..63b7ec86 100644 --- a/opencsp/common/lib/csp/MirrorPoint.py +++ b/opencsp/common/lib/csp/MirrorPoint.py @@ -27,9 +27,7 @@ def __init__( surface_points: Pxyz, normal_vectors: Uxyz, shape: RegionXY, - interpolation_type: Literal[ - 'given', 'bilinear', 'clough_tocher', 'nearest' - ] = 'nearest', + interpolation_type: Literal['given', 'bilinear', 'clough_tocher', 'nearest'] = 'nearest', ) -> None: """Class representing a mirror defined by discrete, scattered points and corresponding normal vectors. @@ -69,8 +67,7 @@ def __init__( self._define_interpolation(interpolation_type) def _define_interpolation( - self, - interpolation_type: Literal['given', 'bilinear', 'clough_tocher', 'nearest'], + self, interpolation_type: Literal['given', 'bilinear', 'clough_tocher', 'nearest'] ) -> None: """Defines the interpolation type to use @@ -101,14 +98,10 @@ def _define_interpolation( # Z coordinate interpolation object points_xy = self.surface_points.projXY().data.T # Nx2 array Z = self.surface_points.z - self.surface_function = interp.CloughTocher2DInterpolator( - points_xy, Z, np.nan - ) + self.surface_function = interp.CloughTocher2DInterpolator(points_xy, Z, np.nan) # Normal vector interpolation object Z_N = self.normal_vectors.data.T - self.normals_function = interp.CloughTocher2DInterpolator( - points_xy, Z_N, np.nan - ) + self.normals_function = interp.CloughTocher2DInterpolator(points_xy, Z_N, np.nan) elif interpolation_type == 'nearest': # Z coordinate interpolation object points_xy = self.surface_points.projXY().data.T # Nx2 array @@ -120,29 +113,20 @@ def _define_interpolation( elif interpolation_type == 'given': # Z coordinate lookup function points_lookup = { - (x, y): z - for x, y, z in zip( - self.surface_points.x, self.surface_points.y, self.surface_points.z - ) + (x, y): z for x, y, z in zip(self.surface_points.x, self.surface_points.y, self.surface_points.z) } self.surface_function = FXYD(points_lookup) # Normal vector lookup function normals_lookup = { (x, y): normal - for x, y, normal in zip( - self.surface_points.x, - self.surface_points.y, - self.normal_vectors.data.T, - ) + for x, y, normal in zip(self.surface_points.x, self.surface_points.y, self.normal_vectors.data.T) } self.normals_function = FXYD(normals_lookup) # Assert that there are no duplicate (x,y) pairs if len(points_lookup) != len(self.surface_points): raise ValueError("All (x,y) pairs must be unique.") else: - raise ValueError( - f"Interpolation type {str(interpolation_type)} does not exist." - ) + raise ValueError(f"Interpolation type {str(interpolation_type)} does not exist.") # Save interpolation type self.interpolation_type = interpolation_type @@ -162,18 +146,13 @@ def surface_displacement_at(self, p: Pxy) -> np.ndarray: return self.surface_function(p.x, p.y) def survey_of_points( - self, - resolution: int = 1, - resolution_type: str = "pixelX", - random_seed: int | None = None, + self, resolution: int = 1, resolution_type: str = "pixelX", random_seed: int | None = None ) -> tuple[Pxyz, Vxyz]: # If using "given" type samping if self.interpolation_type == 'given': if resolution_type != "given": warn( - "Resolution type becomes 'given' when using type 'given' interpolation.", - UserWarning, - stacklevel=2, + "Resolution type becomes 'given' when using type 'given' interpolation.", UserWarning, stacklevel=2 ) given_points_xy = self.surface_points.projXY() points = self.location_in_space(given_points_xy) @@ -181,18 +160,11 @@ def survey_of_points( # If surface is interpolated, sample using MirrorAbstact method else: - points, normals = super().survey_of_points( - resolution, resolution_type, random_seed - ) + points, normals = super().survey_of_points(resolution, resolution_type, random_seed) return points, normals - def draw( - self, - view: View3d, - mirror_style: RenderControlMirror, - transform: TransformXYZ | None = None, - ) -> None: + def draw(self, view: View3d, mirror_style: RenderControlMirror, transform: TransformXYZ | None = None) -> None: # If no interpolation if self.interpolation_type == 'given': resolution = mirror_style.resolution @@ -208,9 +180,7 @@ def draw( # Calculate z height of boundary to draw (lowest z value) min_val = min(self.surface_displacement_at(domain)) - edge_values_lifted = Vxyz( - [edge_values.x, edge_values.y, [min_val] * len(edge_values)] - ) + edge_values_lifted = Vxyz([edge_values.x, edge_values.y, [min_val] * len(edge_values)]) # Convert edge values to global coordinate system edge_values_lifted = transform.apply(edge_values_lifted) @@ -235,13 +205,8 @@ def draw( normals.rotate_in_place(transform.R) # Draw points and normals - xyzdxyz = [ - [point.data, normal.data * mirror_style.norm_len] - for point, normal in zip(points, normals) - ] - view.draw_xyzdxyz_list( - xyzdxyz, close=False, style=mirror_style.norm_base_style - ) + xyzdxyz = [[point.data, normal.data * mirror_style.norm_len] for point, normal in zip(points, normals)] + view.draw_xyzdxyz_list(xyzdxyz, close=False, style=mirror_style.norm_base_style) # If surface is interpolated, draw mirror using MirrorAbstract method else: diff --git a/opencsp/common/lib/csp/OpticOrientation.py b/opencsp/common/lib/csp/OpticOrientation.py index 137b40f6..72885a36 100644 --- a/opencsp/common/lib/csp/OpticOrientation.py +++ b/opencsp/common/lib/csp/OpticOrientation.py @@ -44,27 +44,17 @@ def __repr__(self): if self.no_child: out += 'Child to Base: None\n' else: - out += ( - 'Child to Base: ' - + str(self.transform_child_to_base.R.as_rotvec().round(3)) - + '\n' - ) + out += 'Child to Base: ' + str(self.transform_child_to_base.R.as_rotvec().round(3)) + '\n' # Base to parent if self.no_parent: out += 'Base to parent: None\n' else: - out += ( - 'Base to parent: ' - + str(self.transform_base_to_parent.R.as_rotvec().round(3)) - + '\n' - ) + out += 'Base to parent: ' + str(self.transform_base_to_parent.R.as_rotvec().round(3)) + '\n' # Child to parent if self.no_child or self.no_parent: out += 'Child to parent: None' else: - out += 'Child to parent: ' + str( - self.transform_child_to_base.R.as_rotvec().round(3) - ) + out += 'Child to parent: ' + str(self.transform_child_to_base.R.as_rotvec().round(3)) return out @property @@ -79,9 +69,7 @@ def transform_child_to_base(self, transform: TransformXYZ) -> None: if self.no_child: raise ValueError('Optic does not have child mount.') if not isinstance(transform, TransformXYZ): - raise TypeError( - f'Input transform must be type {TransformXYZ} but is type {type(transform)}' - ) + raise TypeError(f'Input transform must be type {TransformXYZ} but is type {type(transform)}') self._transform_child_to_base = transform.copy() @property @@ -96,9 +84,7 @@ def transform_base_to_parent(self, transform: TransformXYZ) -> TransformXYZ: if self.no_parent: raise ValueError('Optic does not have parent mount.') if not isinstance(transform, TransformXYZ): - raise TypeError( - f'Input transform must be type {TransformXYZ} but is type {type(transform)}' - ) + raise TypeError(f'Input transform must be type {TransformXYZ} but is type {type(transform)}') self._transform_base_to_parent = transform.copy() @property diff --git a/opencsp/common/lib/csp/RayTrace.py b/opencsp/common/lib/csp/RayTrace.py index d6933fdf..d6d18fba 100644 --- a/opencsp/common/lib/csp/RayTrace.py +++ b/opencsp/common/lib/csp/RayTrace.py @@ -23,9 +23,7 @@ from opencsp.common.lib.geometry.Vxy import Vxy from opencsp.common.lib.geometry.Vxyz import Vxyz from opencsp.common.lib.render.View3d import View3d -from opencsp.common.lib.render_control.RenderControlRayTrace import ( - RenderControlRayTrace, -) +from opencsp.common.lib.render_control.RenderControlRayTrace import RenderControlRayTrace from opencsp.common.lib.tool.hdf5_tools import load_hdf5_datasets, save_hdf5_datasets from opencsp.common.lib.tool.typing_tools import strict_types @@ -56,9 +54,7 @@ def __add__(self, trace: 'RayTrace'): for obj in self.scene.objects + trace.scene.objects: sum_trace.scene.add_object(obj) - sum_trace.light_paths_ensemble = ( - self.light_paths_ensemble + trace.light_paths_ensemble - ) + sum_trace.light_paths_ensemble = self.light_paths_ensemble + trace.light_paths_ensemble return sum_trace @@ -71,9 +67,7 @@ def draw(self, view: View3d, trace_style: RenderControlRayTrace = None) -> None: for lp in self.light_paths_ensemble: lp.draw(view, trace_style.light_path_control) - def draw_subset( - self, view: View3d, count: int, trace_style: RenderControlRayTrace = None - ): + def draw_subset(self, view: View3d, count: int, trace_style: RenderControlRayTrace = None): for i in np.floor(np.linspace(0, len(self.light_paths_ensemble) - 1, count)): lp = self.light_paths_ensemble[int(i)] lp.draw(view, trace_style.light_path_control) @@ -87,26 +81,16 @@ def from_hdf(cls, filename: str, trace_name: str = "RayTrace") -> 'RayTrace': """Creates a RayTrace object from an hdf5 file.""" trace = RayTrace() - batch_names: list[str] = list( - load_hdf5_datasets([f"RayTrace_{trace_name}/Batches"], filename).values() - )[0] + batch_names: list[str] = list(load_hdf5_datasets([f"RayTrace_{trace_name}/Batches"], filename).values())[0] lpe = LightPathEnsemble([]) for batch in batch_names: prefix = f"RayTrace_{trace_name}/Batches/{batch}/" - subgroups = [ - prefix + 'CurrentDirections', - prefix + 'InitialDirections', - prefix + 'Points', - ] - curr_directions, init_directions, points = list( - load_hdf5_datasets(subgroups, filename).values() - ) + subgroups = [prefix + 'CurrentDirections', prefix + 'InitialDirections', prefix + 'Points'] + curr_directions, init_directions, points = list(load_hdf5_datasets(subgroups, filename).values()) curr_directions = Uxyz(curr_directions) init_directions = Uxyz(init_directions) points = list(map(Pxyz, points)) - lpe.concatenate_in_place( - LightPathEnsemble.from_parts(init_directions, points, curr_directions) - ) + lpe.concatenate_in_place(LightPathEnsemble.from_parts(init_directions, points, curr_directions)) trace.light_paths_ensemble = lpe return trace @@ -185,9 +169,7 @@ def trace_scene_unvec( ray_trace = RayTrace(scene) for obj in scene.objects: # get the points on the mirror to reflect off of - points_and_normals = obj.survey_of_points( - obj_resolution, random_dist=random_dist - ) + points_and_normals = obj.survey_of_points(obj_resolution, random_dist=random_dist) # from tutorial https://www.geeksforgeeks.org/python-unzip-a-list-of-tuples/ unzipped_p_and_n = list(zip(*points_and_normals)) just_points = unzipped_p_and_n[0] @@ -230,9 +212,7 @@ def trace_scene_unvec( ray = LightPath([], vector_from_path) ray.add_step(p, ref_vec[:, 0]) ray_trace.add_light_path(ray) - if ( - verbose and i in checkpoints - ): # TODO tjlarki: make sure this check does not slow down the program + if verbose and i in checkpoints: # TODO tjlarki: make sure this check does not slow down the program print(f"{i/tot:.2%} through tracing.") # if save_in_file: @@ -263,13 +243,9 @@ def trace_scene( stacklevel=2, ) if save_in_file and save_name == None: - raise ValueError( - "save_in_file flag was True, but no file was specified to dave in." - ) + raise ValueError("save_in_file flag was True, but no file was specified to dave in.") if save_in_file and max_ram_in_use_percent < psutil.virtual_memory().percent: - raise MemoryError( - "Maximum memory allocated to ray trace was reached before the trace has begun" - ) + raise MemoryError("Maximum memory allocated to ray trace was reached before the trace has begun") # start if verbose: @@ -302,16 +278,9 @@ def trace_for_single_object(obj: RayTraceable) -> LightPathEnsemble: # Loop through points and perform trace calculations for i, (p, n_v) in enumerate(zip(points, normals)): ################# TODO Tjlarki: draft for saving traces ###################### - if ( - save_in_file - and max_ram_in_use_percent < psutil.virtual_memory().percent - ): + if save_in_file and max_ram_in_use_percent < psutil.virtual_memory().percent: prefix = f"RayTrace_{trace_name}/Batches/Batch{batch:03}/" - datasets = [ - prefix + "InitialDirections", - prefix + "Points", - prefix + "CurrentDirections", - ] + datasets = [prefix + "InitialDirections", prefix + "Points", prefix + "CurrentDirections"] data = [ total_lpe.init_directions.data, np.array([points.data for points in total_lpe.points_lists]), @@ -322,9 +291,7 @@ def trace_for_single_object(obj: RayTraceable) -> LightPathEnsemble: save_hdf5_datasets(data, datasets, save_name) total_lpe = LightPathEnsemble([]) if verbose: - print( - f"Batch {batch} is over, now we start batch {(batch:=batch+1)}" - ) + print(f"Batch {batch} is over, now we start batch {(batch:=batch+1)}") ############################################################################## p: Pxyz @@ -347,9 +314,7 @@ def trace_for_single_object(obj: RayTraceable) -> LightPathEnsemble: lpe.add_steps(Pxyz(P), Uxyz(results)) total_lpe.concatenate_in_place(lpe) - if ( - verbose and i in checkpoints - ): # TODO tjlarki: make sure this check does not slow down the program + if verbose and i in checkpoints: # TODO tjlarki: make sure this check does not slow down the program print( f"{i/number_of_rays:.2%} through tracing. Using {psutil.virtual_memory().percent}% of system RAM." ) @@ -366,11 +331,7 @@ def trace_for_single_object(obj: RayTraceable) -> LightPathEnsemble: # Save Last Batch if save_in_file: prefix = f"RayTrace/Batches/Batch{batch:08}/" - datasets = [ - prefix + "InitialDirections", - prefix + "Points", - prefix + "CurrentDirections", - ] + datasets = [prefix + "InitialDirections", prefix + "Points", prefix + "CurrentDirections"] data = [ total_lpe.init_directions.data, np.array([points.data for points in total_lpe.points_lists]), @@ -574,11 +535,7 @@ def trace_scene_parallel( ################################# Save in hdf5 file ################################# if save_file_name != None: prefix = f"RayTrace_{trace_name}/Batches/Batch_{process:03}_{batch:03}/" - dataset_names = [ - prefix + "InitialDirections", - prefix + "Points", - prefix + "CurrentDirections", - ] + dataset_names = [prefix + "InitialDirections", prefix + "Points", prefix + "CurrentDirections"] ray_trace_data = [ process_lpe.init_directions.data, np.array([points.data for points in process_lpe.points_lists]), @@ -607,11 +564,7 @@ def trace_scene_parallel( def plane_intersect_OLD( - ray_trace: RayTrace, - v_plane_center: Vxyz, - u_plane_norm: Uxyz, - epsilon: float = 1e-6, - verbose=False, + ray_trace: RayTrace, v_plane_center: Vxyz, u_plane_norm: Uxyz, epsilon: float = 1e-6, verbose=False ) -> Vxy: """Finds all the intersections that occur at a plane from the light paths in the raytrace. Output points are transformed from the global (i.e. solar field) @@ -728,9 +681,7 @@ def plane_intersect( # filter out points that miss the plane if verbose: print("filtering out missed vectors") - filtered_intersec_points = Pxyz.merge( - list(filter(lambda vec: not vec.hasnan(), intersection_points)) - ) + filtered_intersec_points = Pxyz.merge(list(filter(lambda vec: not vec.hasnan(), intersection_points))) if verbose: print("Rotating.") @@ -758,9 +709,7 @@ def plane_intersect( # TODO tjlarki: create the histogram from this or bin these results -def histogram_image( - bin_res: float, extent: float, pts: Vxy -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: +def histogram_image(bin_res: float, extent: float, pts: Vxy) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Creates a 2D histogram from scattered points @@ -787,17 +736,13 @@ def histogram_image( extent = bin_res * bins rng = [[-extent / 2, extent / 2]] * 2 - hist, x, y = np.histogram2d( - pts.x, pts.y, range=rng, bins=bins, density=False - ) # (y, x) + hist, x, y = np.histogram2d(pts.x, pts.y, range=rng, bins=bins, density=False) # (y, x) hist = hist.T # (x, y) hist = np.flip(hist, 0) # convert from image to array return hist, x, y -def ensquared_energy( - pts: Vxy, semi_width_max: float, res: int = 50 -) -> tuple[np.ndarray, np.ndarray]: +def ensquared_energy(pts: Vxy, semi_width_max: float, res: int = 50) -> tuple[np.ndarray, np.ndarray]: """Calculate ensquared energy as function of square half-width. Parameters diff --git a/opencsp/common/lib/csp/RayTraceable.py b/opencsp/common/lib/csp/RayTraceable.py index 9b81ef84..24d0bcf7 100644 --- a/opencsp/common/lib/csp/RayTraceable.py +++ b/opencsp/common/lib/csp/RayTraceable.py @@ -10,10 +10,7 @@ class RayTraceable: @abstractmethod def survey_of_points( - self, - resolution: int, - resolution_type: str = 'pixelX', - random_seed: int | None = None, + self, resolution: int, resolution_type: str = 'pixelX', random_seed: int | None = None ) -> tuple[Pxyz, Vxyz]: """Returns a set of points sampled from inside the optic region in the optic's parent coordinate reference frame. @@ -38,9 +35,7 @@ def survey_of_points( """ @abstractmethod - def set_position_in_space( - self, translation: Pxyz | Vxyz, rotation: Rotation - ) -> None: + def set_position_in_space(self, translation: Pxyz | Vxyz, rotation: Rotation) -> None: """Sets the optic's base coordinate reference frame location relative to the parent reference frame. When combined into a 3d transformation, this converts base coordinates into parent coordinates. diff --git a/opencsp/common/lib/csp/SolarField.py b/opencsp/common/lib/csp/SolarField.py index 2d9da437..675b484b 100644 --- a/opencsp/common/lib/csp/SolarField.py +++ b/opencsp/common/lib/csp/SolarField.py @@ -24,9 +24,7 @@ from opencsp.common.lib.csp.ufacet.HeliostatConfiguration import HeliostatConfiguration from opencsp.common.lib.csp.RayTraceable import RayTraceable from opencsp.common.lib.render.View3d import View3d -from opencsp.common.lib.render_control.RenderControlSolarField import ( - RenderControlSolarField, -) +from opencsp.common.lib.render_control.RenderControlSolarField import RenderControlSolarField import opencsp.common.lib.render_control.RenderControlFigureRecord as rcfr @@ -58,23 +56,23 @@ def __init__( # Constructed members. # self.heliostats, self.heliostat_dict = self.heliostats_read_file(heliostat_file, facet_centroids_file) self.heliostats = heliostats - self.heliostat_dict = { - heliostat.name: i for i, heliostat in enumerate(heliostats) - } + self.heliostat_dict = {heliostat.name: i for i, heliostat in enumerate(heliostats)} self.num_heliostats = len(self.heliostats) self.heliostat_origin_xyz_list = [h.origin for h in self.heliostats] self.heliostat_origin_fit_plane = g3d.best_fit_plane( self.heliostat_origin_xyz_list ) # 3-d plane fit through heliostat origins. - self._aimpoint_xyz = None # (x,y,y) in m. Do not access this member externally; use aimpoint_xyz() function instead. - self._when_ymdhmsz = None # (y,m,d,h,m,s,z). Do not access this member externally; use when_ymdhmsz() function instead. + self._aimpoint_xyz = ( + None # (x,y,y) in m. Do not access this member externally; use aimpoint_xyz() function instead. + ) + self._when_ymdhmsz = ( + None # (y,m,d,h,m,s,z). Do not access this member externally; use when_ymdhmsz() function instead. + ) self.set_position_in_space(self.origin, self.rotation) # required for RayTracable object but currently has no use - def set_position_in_space( - self, translation: np.ndarray, rotation: Rotation - ) -> None: + def set_position_in_space(self, translation: np.ndarray, rotation: Rotation) -> None: for h in self.heliostats: h.set_position_in_space(translation + h.origin, rotation) @@ -87,17 +85,13 @@ def heliostat_name_list(self): def aimpoint_xyz(self): if self._aimpoint_xyz == None: - logt.error( - 'ERROR: In SolarField.aimpoint_xyz(), attempt to fetch unset _aimpoint_xyz.' - ) + logt.error('ERROR: In SolarField.aimpoint_xyz(), attempt to fetch unset _aimpoint_xyz.') assert False return self._aimpoint_xyz def when_ymdhmsz(self): if self._when_ymdhmsz == None: - logt.error( - 'ERROR: In SolarField.when_ymdhmsz(), attempt to fetch unset _when_ymdhmsz.' - ) + logt.error('ERROR: In SolarField.when_ymdhmsz(), attempt to fetch unset _when_ymdhmsz.') assert False return self._when_ymdhmsz @@ -161,9 +155,7 @@ def situation_abbrev(self): day = self.when_ymdhmsz()[2] hour = self.when_ymdhmsz()[3] minute = self.when_ymdhmsz()[4] - date_time = '{0:d}-{1:02d}-{2:02d}-{3:02d}{4:02d}'.format( - year, month, day, hour, minute - ) + date_time = '{0:d}-{1:02d}-{2:02d}-{3:02d}{4:02d}'.format(year, month, day, hour, minute) aim_Z = 'aimZ=' + str(self.aimpoint_xyz()[2]) # ?? SCAFFOLDING RCB -- MAKE THIS CONTROLLABLE: ON FOR NSTTF, OFF OTHERWISE. # return self.short_name + '_' + date_time + '_' + aim_Z @@ -177,9 +169,7 @@ def situation_str(self): day = self.when_ymdhmsz()[2] hour = self.when_ymdhmsz()[3] minute = self.when_ymdhmsz()[4] - date_time = '{0:d}-{1:d}-{2:d} at {3:02d}{4:02d}'.format( - year, month, day, hour, minute - ) + date_time = '{0:d}-{1:d}-{2:d} at {3:02d}{4:02d}'.format(year, month, day, hour, minute) aim_pt = 'Aim=({0:.1f}, {1:.1f}, {2:.1f})'.format( self.aimpoint_xyz()[0], self.aimpoint_xyz()[1], self.aimpoint_xyz()[2] ) @@ -220,25 +210,13 @@ def set_heliostats_configuration( # RENDERING - def draw( - self, - view: View3d, - solar_field_style: RenderControlSolarField = RenderControlSolarField(), - ) -> None: + def draw(self, view: View3d, solar_field_style: RenderControlSolarField = RenderControlSolarField()) -> None: # Heliostats. if solar_field_style.draw_heliostats: for heliostat in self.heliostats: heliostat.draw(view, solar_field_style.heliostat_styles) - def draw_figure( - self, - figure_control, - axis_control_m, - view_spec, - title, - solar_field_style, - grid=True, - ): + def draw_figure(self, figure_control, axis_control_m, view_spec, title, solar_field_style, grid=True): # Setup view fig_record: rcfr.RenderControlFigureRecord = fm.setup_figure_for_3d_data( figure_control, axis_control_m, view_spec, grid=grid, title=title @@ -249,9 +227,7 @@ def draw_figure( # Return return view - def survey_of_points( - self, resolution, random_dist: bool = False - ) -> tuple[Pxyz, Vxyz]: + def survey_of_points(self, resolution, random_dist: bool = False) -> tuple[Pxyz, Vxyz]: """ Returns a grid of equispaced points and the normal vectors at those points. @@ -268,9 +244,7 @@ def survey_of_points( points = Pxyz([[], [], []]) normals = Vxyz([[], [], []]) for heliostat in self.heliostats: - additional_points, additional_normals = heliostat.survey_of_points( - resolution, random_dist - ) + additional_points, additional_normals = heliostat.survey_of_points(resolution, random_dist) points = points.concatenate(additional_points) normals = normals.concatenate(additional_normals) @@ -282,9 +256,7 @@ def survey_of_points( # -def setup_solar_field( - solar_field_spec, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames -) -> SolarField: +def setup_solar_field(solar_field_spec, aimpoint_xyz, when_ymdhmsz, synch_azelhnames, up_azelhnames) -> SolarField: # Notify progress. logt.info('Setting up solar field...') @@ -328,14 +300,7 @@ def setup_solar_field( # -def draw_solar_field( - figure_control, - solar_field, - solar_field_style, - view_spec, - name_suffix='', - axis_equal=True, -): +def draw_solar_field(figure_control, solar_field, solar_field_style, view_spec, name_suffix='', axis_equal=True): # Assumes that solar field is already set up with heliostat configurations, etc. # Select name and title. if (solar_field.short_name == None) or (len(solar_field.short_name) == 0): @@ -351,12 +316,7 @@ def draw_solar_field( figure_title += ' (' + name_suffix + ')' # Setup figure. fig_record = fm.setup_figure_for_3d_data( - figure_control, - rca.meters(), - view_spec, - name=figure_name, - title=figure_title, - equal=axis_equal, + figure_control, rca.meters(), view_spec, name=figure_name, title=figure_title, equal=axis_equal ) view = fig_record.view # Comment. @@ -420,9 +380,7 @@ def construct_solar_field_heliostat_survey_scan(solar_field, raster_scan_paramet list_of_xyz_segments = vertical_segments + horizontal_segments # Construct the scan. - scan = Scan.construct_scan_given_segments_of_interest( - list_of_xyz_segments, raster_scan_parameters - ) + scan = Scan.construct_scan_given_segments_of_interest(list_of_xyz_segments, raster_scan_parameters) # Return. return scan @@ -489,14 +447,9 @@ def construct_solar_field_vanity_scan(solar_field, vanity_scan_parameters): horizontal_segments.append([[x0, y0, z0], [x1, y1, z1]]) # All passes. # Shuffle to avoid passes that are too close to each other for the UAS to distinguish. - list_of_xyz_segments = listt.zamboni_shuffle( - vertical_segments - ) + listt.zamboni_shuffle(horizontal_segments) + list_of_xyz_segments = listt.zamboni_shuffle(vertical_segments) + listt.zamboni_shuffle(horizontal_segments) - logt.info( - 'In construct_solar_field_vanity_scan(), number of segments = ', - len(list_of_xyz_segments), - ) + logt.info('In construct_solar_field_vanity_scan(), number of segments = ', len(list_of_xyz_segments)) # Rotate to stow azmiuth. rotated_segments = [] @@ -515,9 +468,7 @@ def construct_solar_field_vanity_scan(solar_field, vanity_scan_parameters): rotated_segments.append([xyz0r, xyz1r]) # Construct the scan. - scan = Scan.construct_scan_given_segments_of_interest( - rotated_segments, vanity_scan_parameters - ) + scan = Scan.construct_scan_given_segments_of_interest(rotated_segments, vanity_scan_parameters) # Return. return scan @@ -540,16 +491,12 @@ def sf_from_csv_files( # Constructed members. heliostats: list[Heliostat.Heliostat] - heliostats, _ = heliostats_read_file( - heliostat_file, facet_centroids_file, autoset_canting_and_curvature - ) + heliostats, _ = heliostats_read_file(heliostat_file, facet_centroids_file, autoset_canting_and_curvature) return SolarField(name, short_name, origin_lon_lat, heliostats) def heliostats_read_file( - file_field: str, - file_centroids_offsets: str, - autoset_canting_and_curvature: np.ndarray = None, + file_field: str, file_centroids_offsets: str, autoset_canting_and_curvature: np.ndarray = None ) -> tuple[list[Heliostat.Heliostat], dict[str, int]]: """Reads in a list of heliostats from the given file_field. @@ -586,9 +533,7 @@ def heliostats_read_file( if autoset_canting_and_curvature is None: curvature_func: Callable[[float, float], float] = lambda x, y: x * 0 else: - foc_len = np.linalg.norm( - autoset_canting_and_curvature - np.array([x, y, z]) - ) + foc_len = np.linalg.norm(autoset_canting_and_curvature - np.array([x, y, z])) a = 1.0 / (4 * foc_len) def curvature_func(x, y): diff --git a/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py b/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py index a03f3684..e7756cc6 100644 --- a/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py +++ b/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py @@ -15,9 +15,7 @@ class VisualizeOrthorectifiedSlopeAbstract: """ @abstractmethod - def orthorectified_slope_array( - self, x_vec: np.ndarray, y_vec: np.ndarray - ) -> np.ndarray: + def orthorectified_slope_array(self, x_vec: np.ndarray, y_vec: np.ndarray) -> np.ndarray: pass @property @@ -121,16 +119,7 @@ def plot_orthorectified_slope_error( # Add quiver arrows if quiver_density is not None: - self._add_quivers( - x_image, - y_image, - x_vec, - y_vec, - quiver_density, - axis, - quiver_scale, - quiver_color, - ) + self._add_quivers(x_image, y_image, x_vec, y_vec, quiver_density, axis, quiver_scale, quiver_color) # Label axes axis.set_title(title) @@ -218,16 +207,7 @@ def plot_orthorectified_slope( # Add quiver arrows if quiver_density is not None: - self._add_quivers( - x_image, - y_image, - x_vec, - y_vec, - quiver_density, - axis, - quiver_scale, - quiver_color, - ) + self._add_quivers(x_image, y_image, x_vec, y_vec, quiver_density, axis, quiver_scale, quiver_color) # Label axes axis.set_title(title) @@ -301,9 +281,7 @@ def plot_orthorectified_curvature( extent = (left, right, bottom, top) # Plot image on axes - self._plot_orthorectified_image( - image, axis, 'seismic', extent, clims, 'mrad/meter' - ) + self._plot_orthorectified_image(image, axis, 'seismic', extent, clims, 'mrad/meter') # Label axes axis.set_title(title) @@ -354,9 +332,7 @@ def _add_quivers( v_dirs = -im_y[y1::Ny, x1::Nx] # Add quiver arrows to axes - axis.quiver( - x_locs, y_locs, u_dirs, v_dirs, color=color, scale=scale, scale_units='x' - ) + axis.quiver(x_locs, y_locs, u_dirs, v_dirs, color=color, scale=scale, scale_units='x') def _plot_orthorectified_image( self, diff --git a/opencsp/common/lib/csp/standard_output.py b/opencsp/common/lib/csp/standard_output.py index 65a3fcb2..98a047fa 100644 --- a/opencsp/common/lib/csp/standard_output.py +++ b/opencsp/common/lib/csp/standard_output.py @@ -7,16 +7,10 @@ import opencsp.common.lib.render_control.RenderControlAxis as rca from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure -from opencsp.common.lib.render_control.RenderControlLightPath import ( - RenderControlLightPath, -) +from opencsp.common.lib.render_control.RenderControlLightPath import RenderControlLightPath from opencsp.common.lib.render_control.RenderControlMirror import RenderControlMirror -from opencsp.common.lib.render_control.RenderControlPointSeq import ( - RenderControlPointSeq, -) -from opencsp.common.lib.render_control.RenderControlRayTrace import ( - RenderControlRayTrace, -) +from opencsp.common.lib.render_control.RenderControlPointSeq import RenderControlPointSeq +from opencsp.common.lib.render_control.RenderControlRayTrace import RenderControlRayTrace from opencsp.common.lib.csp.LightSource import LightSource from opencsp.common.lib.csp.MirrorAbstract import MirrorAbstract import opencsp.common.lib.csp.RayTrace as rt @@ -79,53 +73,31 @@ def standard_output( """ # Determine which plots to create plot_reference = optic_ref is not None - plot_ray_trace = ( - (source is not None) - and (v_target_center is not None) - and (v_target_normal is not None) - ) + plot_ray_trace = (source is not None) and (v_target_center is not None) and (v_target_normal is not None) # Perform measured optic ray trace - ray_trace_meas = ray_trace_scene( - optic_meas, source, obj_resolution=vis_options.ray_trace_optic_res - ) + ray_trace_meas = ray_trace_scene(optic_meas, source, obj_resolution=vis_options.ray_trace_optic_res) ray_pts_meas = rt.plane_intersect(ray_trace_meas, v_target_center, v_target_normal) image_meas, xv_meas, yv_meas = rt.histogram_image( - bin_res=vis_options.hist_bin_res, - extent=vis_options.hist_extent, - pts=ray_pts_meas, - ) - ee_meas, ws_meas = rt.ensquared_energy( - ray_pts_meas, vis_options.ensquared_energy_max_semi_width + bin_res=vis_options.hist_bin_res, extent=vis_options.hist_extent, pts=ray_pts_meas ) + ee_meas, ws_meas = rt.ensquared_energy(ray_pts_meas, vis_options.ensquared_energy_max_semi_width) # Perform reference optic ray trace if plot_reference: - ray_trace_ref = ray_trace_scene( - optic_ref, source, obj_resolution=vis_options.ray_trace_optic_res - ) - ray_pts_ref = rt.plane_intersect( - ray_trace_ref, v_target_center, v_target_normal - ) + ray_trace_ref = ray_trace_scene(optic_ref, source, obj_resolution=vis_options.ray_trace_optic_res) + ray_pts_ref = rt.plane_intersect(ray_trace_ref, v_target_center, v_target_normal) image_ref, xv_ref, yv_ref = rt.histogram_image( - bin_res=vis_options.hist_bin_res, - extent=vis_options.hist_extent, - pts=ray_pts_ref, - ) - ee_ref, ws_ref = rt.ensquared_energy( - ray_pts_ref, vis_options.ensquared_energy_max_semi_width + bin_res=vis_options.hist_bin_res, extent=vis_options.hist_extent, pts=ray_pts_ref ) + ee_ref, ws_ref = rt.ensquared_energy(ray_pts_ref, vis_options.ensquared_energy_max_semi_width) # Set up figure control objects for 3d plots fig_control = RenderControlFigure(tile_array=(4, 2), tile_square=True) axis_control = rca.meters() point_styles = RenderControlPointSeq(linestyle='--', color='k', markersize=0) - mirror_control = RenderControlMirror( - surface_normals=True, norm_res=1, point_styles=point_styles - ) - light_path_control = RenderControlLightPath( - current_length=vis_options.ray_trace_plot_ray_length - ) + mirror_control = RenderControlMirror(surface_normals=True, norm_res=1, point_styles=point_styles) + light_path_control = RenderControlLightPath(current_length=vis_options.ray_trace_plot_ray_length) ray_trace_control = RenderControlRayTrace(light_path_control=light_path_control) # Plot measured slope maps @@ -180,10 +152,7 @@ def standard_output( # Plot measured curvature maps fig_rec = fm.setup_figure(fig_control, axis_control, name='Curvature X') optic_meas.plot_orthorectified_curvature( - vis_options.slope_map_resolution, - type_='x', - clim=vis_options.curvature_clim, - axis=fig_rec.axis, + vis_options.slope_map_resolution, type_='x', clim=vis_options.curvature_clim, axis=fig_rec.axis ) if vis_options.to_save: fig_rec.save( @@ -195,10 +164,7 @@ def standard_output( fig_rec = fm.setup_figure(fig_control, axis_control, name='Curvature Y') optic_meas.plot_orthorectified_curvature( - vis_options.slope_map_resolution, - type_='y', - clim=vis_options.curvature_clim, - axis=fig_rec.axis, + vis_options.slope_map_resolution, type_='y', clim=vis_options.curvature_clim, axis=fig_rec.axis ) if vis_options.to_save: fig_rec.save( @@ -263,9 +229,7 @@ def standard_output( if plot_reference and plot_ray_trace: # Draw reference ensemble and traced rays - fig_rec = fm.setup_figure_for_3d_data( - fig_control, axis_control, name='Ray Trace' - ) + fig_rec = fm.setup_figure_for_3d_data(fig_control, axis_control, name='Ray Trace') if len(ray_trace_ref.light_paths) < 100: # Only plot few rays ray_trace_ref.draw(fig_rec.view, ray_trace_control) optic_ref.draw(fig_rec.view, mirror_control) @@ -279,14 +243,8 @@ def standard_output( ) # Draw reference optic sun image on target - fig_rec = fm.setup_figure( - fig_control, axis_control, name='Reference Ray Trace Image' - ) - fig_rec.axis.imshow( - image_ref, - cmap='jet', - extent=(xv_ref.min(), xv_ref.max(), yv_ref.min(), yv_ref.max()), - ) + fig_rec = fm.setup_figure(fig_control, axis_control, name='Reference Ray Trace Image') + fig_rec.axis.imshow(image_ref, cmap='jet', extent=(xv_ref.min(), xv_ref.max(), yv_ref.min(), yv_ref.max())) if vis_options.to_save: fig_rec.save( vis_options.output_dir, @@ -297,14 +255,8 @@ def standard_output( # Draw measured optic sun image on target if plot_ray_trace: - fig_rec = fm.setup_figure( - fig_control, axis_control, name='Measured Ray Trace Image' - ) - fig_rec.axis.imshow( - image_meas, - cmap='jet', - extent=(xv_meas.min(), xv_meas.max(), yv_meas.min(), yv_meas.max()), - ) + fig_rec = fm.setup_figure(fig_control, axis_control, name='Measured Ray Trace Image') + fig_rec.axis.imshow(image_meas, cmap='jet', extent=(xv_meas.min(), xv_meas.max(), yv_meas.min(), yv_meas.max())) if vis_options.to_save: fig_rec.save( vis_options.output_dir, @@ -316,9 +268,7 @@ def standard_output( # Draw ensquared energy plot fig_rec = fm.setup_figure(fig_control, name='Ensquared Energy') if plot_reference: - fig_rec.axis.plot( - ws_ref, ee_ref, label='Reference', color='k', linestyle='--' - ) + fig_rec.axis.plot(ws_ref, ee_ref, label='Reference', color='k', linestyle='--') fig_rec.axis.plot(ws_meas, ee_meas, label='Measured', color='k', linestyle='-') fig_rec.axis.legend() fig_rec.axis.grid() @@ -334,9 +284,7 @@ def standard_output( ) -def ray_trace_scene( - obj: RayTraceable, source: LightSource, obj_resolution=1 -) -> rt.RayTrace: +def ray_trace_scene(obj: RayTraceable, source: LightSource, obj_resolution=1) -> rt.RayTrace: """Performs a raytrace of a simple scene with a source and an optic. Parameters diff --git a/opencsp/common/lib/csp/sun_position.py b/opencsp/common/lib/csp/sun_position.py index 006b1cc5..597a7282 100644 --- a/opencsp/common/lib/csp/sun_position.py +++ b/opencsp/common/lib/csp/sun_position.py @@ -46,24 +46,13 @@ def sun_position_aux( # Decimal hour of the day at Greenwich greenwichtime = hour - timezone + minute / 60 + second / 3600 # Days from J2000, accurate from 1901 to 2099 - daynum = ( - 367 * year - - 7 * (year + (month + 9) // 12) // 4 - + 275 * month // 9 - + day - - 730531.5 - + greenwichtime / 24 - ) + daynum = 367 * year - 7 * (year + (month + 9) // 12) // 4 + 275 * month // 9 + day - 730531.5 + greenwichtime / 24 # Mean longitude of the sun mean_long = daynum * 0.01720279239 + 4.894967873 # Mean anomaly of the Sun mean_anom = daynum * 0.01720197034 + 6.240040768 # Ecliptic longitude of the sun - eclip_long = ( - mean_long - + 0.03342305518 * sin(mean_anom) - + 0.0003490658504 * sin(2 * mean_anom) - ) + eclip_long = mean_long + 0.03342305518 * sin(mean_anom) + 0.0003490658504 * sin(2 * mean_anom) # Obliquity of the ecliptic obliquity = 0.4090877234 - 0.000000006981317008 * daynum # Right ascension of the sun @@ -77,9 +66,7 @@ def sun_position_aux( # Local elevation of the sun elevation = asin(sin(decl) * sin(rlat) + cos(decl) * cos(rlat) * cos(hour_ang)) # Local azimuth of the sun - azimuth = atan2( - -cos(decl) * cos(rlat) * sin(hour_ang), sin(decl) - sin(rlat) * sin(elevation) - ) + azimuth = atan2(-cos(decl) * cos(rlat) * sin(hour_ang), sin(decl) - sin(rlat) * sin(elevation)) # Convert azimuth and elevation to degrees azimuth = into_range(deg(azimuth), 0, 360) elevation = into_range(deg(elevation), -180, 180) @@ -103,14 +90,11 @@ def into_range(x, range_min, range_max): def sun_position( - location_lon_lat: tuple[float, float], # radians. (longitude, lattiude) pair. - when_ymdhmsz: tuple, + location_lon_lat: tuple[float, float], when_ymdhmsz: tuple # radians. (longitude, lattiude) pair. ) -> np.ndarray: # (year, month, day, hour, minute, second, timezone) tuple. # Example: (2022, 7, 4, 11, 20, 0, -6) => July 4, 2022 at 11:20 am MDT (-6 hours) # Get the Sun's apparent location in the sky - azimuth_deg, elevation_deg = sun_position_aux( - location_lon_lat, when_ymdhmsz, True - ) # John Clark Craig's version. + azimuth_deg, elevation_deg = sun_position_aux(location_lon_lat, when_ymdhmsz, True) # John Clark Craig's version. azimuth = np.deg2rad(azimuth_deg) elevation = np.deg2rad(elevation_deg) diff --git a/opencsp/common/lib/csp/sun_track.py b/opencsp/common/lib/csp/sun_track.py index 927608fb..cd21c9dc 100644 --- a/opencsp/common/lib/csp/sun_track.py +++ b/opencsp/common/lib/csp/sun_track.py @@ -103,9 +103,7 @@ def tracking_surface_normal_xyz( heliostat_xyz: list | np.ndarray | tuple, # (x,y,z) in m. Heliostat origin. aimpoint_xyz: list | np.ndarray | tuple, # (x,y,z) in m. Reflection aim point. - location_lon_lat: ( - list | np.ndarray | tuple - ), # (lon,lat) in rad. Solar field origin. + location_lon_lat: list | np.ndarray | tuple, # (lon,lat) in rad. Solar field origin. when_ymdhmsz: list | np.ndarray | tuple, ): # (year, month, day, hour, minute, second, timezone) tuple. # Example: (2022, 7, 4, 11, 20, 0, -6) @@ -198,9 +196,7 @@ def tracking_surface_normal_xy( Computes heliostat surface normal which tracks the sun to the aimpoint. Returns only (x,y) components of the surface normal. """ - normal_xyz = tracking_surface_normal_xyz( - heliostat_xyz, aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) + normal_xyz = tracking_surface_normal_xyz(heliostat_xyz, aimpoint_xyz, location_lon_lat, when_ymdhmsz) return [normal_xyz[0], normal_xyz[1]] @@ -218,9 +214,7 @@ def tracking_nu( nu is the angle to the projection of the surface normal onto the (x,y) plane, measured ccw from the x axis. """ # Compute heliostat surface normal which tracks the sun to the aimpoint. - n_xy = tracking_surface_normal_xy( - heliostat_xyz, aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) + n_xy = tracking_surface_normal_xy(heliostat_xyz, aimpoint_xyz, location_lon_lat, when_ymdhmsz) # Extract surface normal coordinates. n_x = n_xy[0] diff --git a/opencsp/common/lib/csp/test/test_MirrorParametric.py b/opencsp/common/lib/csp/test/test_MirrorParametric.py index 45f3a125..842b0eed 100644 --- a/opencsp/common/lib/csp/test/test_MirrorParametric.py +++ b/opencsp/common/lib/csp/test/test_MirrorParametric.py @@ -14,9 +14,7 @@ class TestMirrorParametric: def get_region_test_mirror(self) -> RegionXY: """Returns a test mirror region""" - return RegionXY.from_vertices( - Vxy(([0.5, -0.5, -0.5, 0.5], [-0.5, -0.5, 0.5, 0.5])) - ) + return RegionXY.from_vertices(Vxy(([0.5, -0.5, -0.5, 0.5], [-0.5, -0.5, 0.5, 0.5]))) def get_test_flat_mirror(self, height: float) -> MirrorParametric: """Returns a flat mirror with defined height""" diff --git a/opencsp/common/lib/csp/test/test_MirrorPoint.py b/opencsp/common/lib/csp/test/test_MirrorPoint.py index 557b30bc..3bd7d869 100644 --- a/opencsp/common/lib/csp/test/test_MirrorPoint.py +++ b/opencsp/common/lib/csp/test/test_MirrorPoint.py @@ -15,13 +15,9 @@ class TestMirrorPoint: def get_region_test_mirror(self) -> RegionXY: """Returns test mirror region""" - return RegionXY.from_vertices( - Vxy(([0.5, -0.5, -0.5, 0.5], [-0.5, -0.5, 0.5, 0.5])) - ) + return RegionXY.from_vertices(Vxy(([0.5, -0.5, -0.5, 0.5], [-0.5, -0.5, 0.5, 0.5]))) - def get_test_mirror_flat( - self, height: float, interpolation_type: str - ) -> MirrorPoint: + def get_test_mirror_flat(self, height: float, interpolation_type: str) -> MirrorPoint: """Returns a test instance of a MirrorPoint object""" # Calculate surface xyz points xv = yv = np.arange(-0.5, 0.5, 0.1) diff --git a/opencsp/common/lib/csp/test/test_csp_optics_orientations.py b/opencsp/common/lib/csp/test/test_csp_optics_orientations.py index a77d8b3b..55132af0 100644 --- a/opencsp/common/lib/csp/test/test_csp_optics_orientations.py +++ b/opencsp/common/lib/csp/test/test_csp_optics_orientations.py @@ -17,9 +17,7 @@ class TestCSPOpticsOrientation: are checked at different levels in the heirarchy. """ - def _generate_optics_rotation( - self, r1: Rotation, r2: Rotation, r3: Rotation, r4: Rotation, r5: Rotation - ): + def _generate_optics_rotation(self, r1: Rotation, r2: Rotation, r3: Rotation, r4: Rotation, r5: Rotation): # Define delta movement dv = Vxyz((0, 0, 0)) @@ -47,9 +45,7 @@ def child_to_parent(r): # Save objects return mirror, facet, heliostat - def _check_rotation( - self, mirror, facet, heliostat, a1: float, a2: float, a3: float, a4: float - ): + def _check_rotation(self, mirror, facet, heliostat, a1: float, a2: float, a3: float, a4: float): # Test norm_0 = mirror.surface_norm_at(Vxy((0, 0))) # mirror base norm_1 = mirror.survey_of_points(1)[1][0] # mirror parent diff --git a/opencsp/common/lib/csp/ufacet/Facet.py b/opencsp/common/lib/csp/ufacet/Facet.py index daaa657b..becab7c1 100644 --- a/opencsp/common/lib/csp/ufacet/Facet.py +++ b/opencsp/common/lib/csp/ufacet/Facet.py @@ -59,16 +59,12 @@ def __init__( # if additional information (backside structure, bolt locations, etc) is needed # Fill in here - def set_position_in_space( - self, translation: np.ndarray, rotation: Rotation - ) -> None: + def set_position_in_space(self, translation: np.ndarray, rotation: Rotation) -> None: # Sets facet's position given heliostat configuration. self.origin: np.ndarray = np.array(translation) + rotation.apply( np.array(self.centroid_offset) ) # R_aiming * T_pivot * T_offset * R_canting * M_origin - self.composite_rotation: Rotation = ( - rotation * self.canting - ) # TODO tjlarki: is this right? + self.composite_rotation: Rotation = rotation * self.canting # TODO tjlarki: is this right? self.surface_normal = self.composite_rotation.apply( [0, 0, 1] ) # TODO tjlarki: rename this center surface normal, or normal direction. @@ -78,43 +74,23 @@ def set_position_in_space( self._update_corners_position_in_space() pass - def set_facet_position_in_space( - self, hel_centroid: np.ndarray, hel_rotation: Rotation - ) -> None: - warn( - "Use Facet.set_position_in_space() instead.", - category=DeprecationWarning, - stacklevel=2, - ) + def set_facet_position_in_space(self, hel_centroid: np.ndarray, hel_rotation: Rotation) -> None: + warn("Use Facet.set_position_in_space() instead.", category=DeprecationWarning, stacklevel=2) self.set_position_in_space(hel_centroid, hel_rotation) pass def _update_corners_position_in_space(self) -> None: # following are not set up for canting angle - self.top_left_corner = self.origin + self.composite_rotation.apply( - self.top_left_corner_offset - ) - self.top_right_corner = self.origin + self.composite_rotation.apply( - self.top_right_corner_offset - ) - self.bottom_right_corner = self.origin + self.composite_rotation.apply( - self.bottom_right_corner_offset - ) - self.bottom_left_corner = self.origin + self.composite_rotation.apply( - self.bottom_left_corner_offset - ) + self.top_left_corner = self.origin + self.composite_rotation.apply(self.top_left_corner_offset) + self.top_right_corner = self.origin + self.composite_rotation.apply(self.top_right_corner_offset) + self.bottom_right_corner = self.origin + self.composite_rotation.apply(self.bottom_right_corner_offset) + self.bottom_left_corner = self.origin + self.composite_rotation.apply(self.bottom_left_corner_offset) # def update_position_in_space(self): # self.set_position_in_space(self.origin, self.canting) - def set_facet_position_in_space( - self, hel_centroid: np.ndarray, hel_rotation: Rotation - ) -> None: - warn( - "Use Facet.set_position_in_space() instead.", - category=DeprecationWarning, - stacklevel=2, - ) + def set_facet_position_in_space(self, hel_centroid: np.ndarray, hel_rotation: Rotation) -> None: + warn("Use Facet.set_position_in_space() instead.", category=DeprecationWarning, stacklevel=2) self.set_position_in_space(hel_centroid, hel_rotation) def surface_normal_ray(self, base: np.ndarray, length: float): @@ -145,49 +121,27 @@ def draw(self, view: View3d, facet_styles: RenderControlFacet): # Outline. if facet_style.draw_outline: - corners = [ - self.top_left_corner, - self.top_right_corner, - self.bottom_right_corner, - self.bottom_left_corner, - ] + corners = [self.top_left_corner, self.top_right_corner, self.bottom_right_corner, self.bottom_left_corner] view.draw_xyz_list(corners, close=True, style=facet_style.outline_style) # Surface normal. if facet_style.draw_surface_normal: # Construct ray. - surface_normal_ray = self.surface_normal_ray( - self.origin, facet_style.surface_normal_length - ) + surface_normal_ray = self.surface_normal_ray(self.origin, facet_style.surface_normal_length) # Draw ray and its base. view.draw_xyz(self.origin, style=facet_style.surface_normal_base_style) - view.draw_xyz_list( - surface_normal_ray, style=facet_style.surface_normal_style - ) + view.draw_xyz_list(surface_normal_ray, style=facet_style.surface_normal_style) # Surface normal drawn at corners. # (Not the surface normal at the corner. Facet curvature is not shown.) if facet_style.draw_surface_normal_at_corners: # Construct rays. - top_left_ray = self.surface_normal_ray( - self.top_left_corner, facet_style.corner_normal_length - ) - top_right_ray = self.surface_normal_ray( - self.top_right_corner, facet_style.corner_normal_length - ) - bottom_left_ray = self.surface_normal_ray( - self.bottom_left_corner, facet_style.corner_normal_length - ) - bottom_right_ray = self.surface_normal_ray( - self.bottom_right_corner, facet_style.corner_normal_length - ) + top_left_ray = self.surface_normal_ray(self.top_left_corner, facet_style.corner_normal_length) + top_right_ray = self.surface_normal_ray(self.top_right_corner, facet_style.corner_normal_length) + bottom_left_ray = self.surface_normal_ray(self.bottom_left_corner, facet_style.corner_normal_length) + bottom_right_ray = self.surface_normal_ray(self.bottom_right_corner, facet_style.corner_normal_length) rays = [top_left_ray, top_right_ray, bottom_left_ray, bottom_right_ray] - corners = [ - self.top_left_corner, - self.top_right_corner, - self.bottom_right_corner, - self.bottom_left_corner, - ] + corners = [self.top_left_corner, self.top_right_corner, self.bottom_right_corner, self.bottom_left_corner] # Draw each ray and its base. for base, ray in zip(corners, rays): view.draw_xyz(base, style=facet_style.corner_normal_base_style) @@ -200,9 +154,7 @@ def draw(self, view: View3d, facet_styles: RenderControlFacet): if facet_style.draw_mirror_curvature: self.mirror.draw(view, facet_style.mirror_styles) - def survey_of_points( - self, resolution, random_dist: bool = False - ) -> tuple[Pxyz, Vxyz]: + def survey_of_points(self, resolution, random_dist: bool = False) -> tuple[Pxyz, Vxyz]: return self.mirror.survey_of_points(resolution, random_dist) # override function from RayTraceable diff --git a/opencsp/common/lib/csp/ufacet/Heliostat.py b/opencsp/common/lib/csp/ufacet/Heliostat.py index b3bd21a6..d00d51f2 100644 --- a/opencsp/common/lib/csp/ufacet/Heliostat.py +++ b/opencsp/common/lib/csp/ufacet/Heliostat.py @@ -21,20 +21,14 @@ import opencsp.common.lib.render_control.RenderControlHeliostat as rch import opencsp.common.lib.tool.math_tools as mt from opencsp.common.lib.csp.ufacet.Facet import Facet -from opencsp.common.lib.csp.MirrorParametricRectangular import ( - MirrorParametricRectangular, -) +from opencsp.common.lib.csp.MirrorParametricRectangular import MirrorParametricRectangular from opencsp.common.lib.csp.RayTraceable import RayTraceable from opencsp.common.lib.geometry.Pxyz import Pxyz from opencsp.common.lib.geometry.Uxyz import Uxyz from opencsp.common.lib.geometry.Vxyz import Vxyz from opencsp.common.lib.render.View3d import View3d -from opencsp.common.lib.render_control.RenderControlEnsemble import ( - RenderControlEnsemble, -) -from opencsp.common.lib.render_control.RenderControlHeliostat import ( - RenderControlHeliostat, -) +from opencsp.common.lib.render_control.RenderControlEnsemble import RenderControlEnsemble +from opencsp.common.lib.render_control.RenderControlHeliostat import RenderControlHeliostat from opencsp.common.lib.tool.typing_tools import strict_types @@ -114,51 +108,29 @@ def __init__( self.top_left_facet: Facet = self.facets[self.facet_dict[str(1)]] self.top_right_facet: Facet = self.facets[self.facet_dict[str(num_cols)]] self.bottom_right_facet: Facet = self.facets[self.facet_dict[str(num_facets)]] - self.bottom_left_facet: Facet = self.facets[ - self.facet_dict[str(num_facets - num_cols + 1)] - ] + self.bottom_left_facet: Facet = self.facets[self.facet_dict[str(num_facets - num_cols + 1)]] # Heliostat Corners [offsets in terms of heliostat's centroid] self.top_left_corner_offset = [ - x + y - for x, y in zip( - self.top_left_facet.centroid_offset, - self.top_left_facet.top_left_corner_offset, - ) + x + y for x, y in zip(self.top_left_facet.centroid_offset, self.top_left_facet.top_left_corner_offset) ] self.top_right_corner_offset = [ - x + y - for x, y in zip( - self.top_right_facet.centroid_offset, - self.top_right_facet.top_right_corner_offset, - ) + x + y for x, y in zip(self.top_right_facet.centroid_offset, self.top_right_facet.top_right_corner_offset) ] self.bottom_right_corner_offset = [ x + y - for x, y in zip( - self.bottom_right_facet.centroid_offset, - self.bottom_right_facet.bottom_right_corner_offset, - ) + for x, y in zip(self.bottom_right_facet.centroid_offset, self.bottom_right_facet.bottom_right_corner_offset) ] self.bottom_left_corner_offset = [ x + y - for x, y in zip( - self.bottom_left_facet.centroid_offset, - self.bottom_left_facet.bottom_left_corner_offset, - ) + for x, y in zip(self.bottom_left_facet.centroid_offset, self.bottom_left_facet.bottom_left_corner_offset) ] # Centroid - self.origin = np.array( - [origin[0], origin[1], origin[2]] - ) # Origin is at torque tube center. - - self.az = np.deg2rad( - 180 - ) # (az,el) = (180,90) degrees corresponds to pointing straight up, - self.el = np.deg2rad( - 90 - ) # as if transitioned by tilting up from face south orientation. + self.origin = np.array([origin[0], origin[1], origin[2]]) # Origin is at torque tube center. + + self.az = np.deg2rad(180) # (az,el) = (180,90) degrees corresponds to pointing straight up, + self.el = np.deg2rad(90) # as if transitioned by tilting up from face south orientation. self.surface_normal = [0, 0, 1] # self.rx_rotation = np.identity(3) self.rz_rotation = np.identity(3) @@ -166,8 +138,12 @@ def __init__( # self._set_corner_positions_in_space() # Tracking - self._aimpoint_xyz = None # (x,y,y) in m. Do not access this member externally; use aimpoint_xyz() function instead. - self._when_ymdhmsz = None # (y,m,d,h,m,s,z). Do not access this member externally; use when_ymdhmsz() function instead. + self._aimpoint_xyz = ( + None # (x,y,y) in m. Do not access this member externally; use aimpoint_xyz() function instead. + ) + self._when_ymdhmsz = ( + None # (y,m,d,h,m,s,z). Do not access this member externally; use when_ymdhmsz() function instead. + ) # SET POSITION IN SPACE self.set_position_in_space(self.origin, self.rotation) @@ -177,18 +153,14 @@ def __init__( @property def aimpoint_xyz(self): if self._aimpoint_xyz == None: - print( - 'ERROR: In Heliostat.aimpoint_xyz(), attempt to fetch unset _aimpoint_xyz.' - ) + print('ERROR: In Heliostat.aimpoint_xyz(), attempt to fetch unset _aimpoint_xyz.') assert False return self._aimpoint_xyz @property def when_ymdhmsz(self): if self._when_ymdhmsz == None: - print( - 'ERROR: In Heliostat.when_ymdhmsz(), attempt to fetch unset _when_ymdhmsz.' - ) + print('ERROR: In Heliostat.when_ymdhmsz(), attempt to fetch unset _when_ymdhmsz.') assert False return self._when_ymdhmsz @@ -205,16 +177,11 @@ def surface_normal_ray(self, base, length): return ray def compute_tracking_configuration( - self, - aimpoint_xyz: list | np.ndarray, - location_lon_lat: tuple | list, - when_ymdhmsz: tuple, + self, aimpoint_xyz: list | np.ndarray, location_lon_lat: tuple | list, when_ymdhmsz: tuple ): if self.use_center_facet_for_aiming: if self.center_facet == None: - raise AttributeError( - f"Helisotat (Name: {self.name}) does not have a center facet defined" - ) + raise AttributeError(f"Helisotat (Name: {self.name}) does not have a center facet defined") # TODO tjlarki: centroid_offset needs to become a Pxyz eventually d: float = self.center_facet.centroid_offset[2] @@ -225,28 +192,19 @@ def compute_tracking_configuration( h = Pxyz(h_tube) # Later, add correction for center facet offset. # Compute heliostat surface normal which tracks the sun to the aimpoint. - n = Vxyz( - st.tracking_surface_normal_xyz( - h_tube, aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) - ).normalize() + n = Vxyz(st.tracking_surface_normal_xyz(h_tube, aimpoint_xyz, location_lon_lat, when_ymdhmsz)).normalize() # iteratively find the normal vectors that actually # take into account the offset to the center facet for _ in range(10): n = Vxyz( st.tracking_surface_normal_xyz( - (h + n * d).data.T.flatten(), - aimpoint_xyz, - location_lon_lat, - when_ymdhmsz, + (h + n * d).data.T.flatten(), aimpoint_xyz, location_lon_lat, when_ymdhmsz ) ).normalize() # Compute heliostat configuration. - return hc.heliostat_configuration_given_surface_normal_xyz( - n.data.T.flatten() - ) + return hc.heliostat_configuration_given_surface_normal_xyz(n.data.T.flatten()) else: # Heliostat centroid coordinates. # Coordinates are (x,z) center, z=0 is at torque tube height. @@ -254,9 +212,7 @@ def compute_tracking_configuration( h = h_tube # Later, add correction for center facet offset. # Compute heliostat surface normal which tracks the sun to the aimpoint. - n_xyz = st.tracking_surface_normal_xyz( - h, aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) + n_xyz = st.tracking_surface_normal_xyz(h, aimpoint_xyz, location_lon_lat, when_ymdhmsz) # Compute heliostat configuration. return hc.heliostat_configuration_given_surface_normal_xyz(n_xyz) @@ -276,9 +232,7 @@ def compute_tracking_configuration_from_sun_vector( The vector from the center of the sun. """ if self.center_facet == None: - raise AttributeError( - f"Helisotat (Name: {self.name}) does not have a center facet defined" - ) + raise AttributeError(f"Helisotat (Name: {self.name}) does not have a center facet defined") # TODO tjlarki: centroid_offset needs to become a Pxyz eventually d: float = self.center_facet.centroid_offset[2] @@ -289,19 +243,13 @@ def compute_tracking_configuration_from_sun_vector( h = Pxyz(h_tube) # Later, add correction for center facet offset. # Compute heliostat surface normal which tracks the sun to the aimpoint. - n = Vxyz( - st.tracking_surface_normal_xyz_given_sun_vector( - h_tube, aimpoint_xyz, sun_vector - ) - ).normalize() + n = Vxyz(st.tracking_surface_normal_xyz_given_sun_vector(h_tube, aimpoint_xyz, sun_vector)).normalize() # iteratively find the normal vectors that actually # take into account the offset to the center facet for _ in range(10): n = Vxyz( - st.tracking_surface_normal_xyz_given_sun_vector( - (h + n * d).data.T.flatten(), aimpoint_xyz, sun_vector - ) + st.tracking_surface_normal_xyz_given_sun_vector((h + n * d).data.T.flatten(), aimpoint_xyz, sun_vector) ).normalize() # Compute heliostat configuration. @@ -311,12 +259,8 @@ def compute_stow_configuration(self): # ?? TODO RCB -- MAKE THIS SENSITIVE TO INPUT DEFINITION. NSTTF = True if NSTTF: - azimuth = np.deg2rad( - 270.0 - ) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. - elevation = np.deg2rad( - -85.0 - ) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. + azimuth = np.deg2rad(270.0) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. + elevation = np.deg2rad(-85.0) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. else: # ?? TODO RCB -- FOR NOW, ASSUME RADIAL STOW. MAKE CONTROLLABLE. origin_x = self.origin[0] @@ -333,12 +277,8 @@ def compute_face_up_configuration(self): # ?? TODO RCB -- MAKE THIS SENSITIVE TO INPUT DEFINITION. NSTTF = True if NSTTF: - azimuth = np.deg2rad( - 180.0 - ) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. - elevation = np.deg2rad( - 90.0 - ) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. + azimuth = np.deg2rad(180.0) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. + elevation = np.deg2rad(90.0) # ?? SCAFFOLDING RCB -- FIND OUT THE CORRECT NUMBER FOR THIS. else: # ?? TODO RCB -- FOR NOW, ASSUME RADIAL STOW. MAKE CONTROLLABLE. origin_x = self.origin[0] @@ -355,12 +295,7 @@ def corners(self): """Returns the list of corners in ul,ur,lr,ll order.""" # Assumes that heliostat coordinates have been set, and the corners have been set. # Later we can add a more meaningful check for this. - return [ - self.top_left_corner, - self.top_right_corner, - self.bottom_right_corner, - self.bottom_left_corner, - ] + return [self.top_left_corner, self.top_right_corner, self.bottom_right_corner, self.bottom_left_corner] def get_configuration(self): return hc.HeliostatConfiguration(self.az, self.el) @@ -368,9 +303,7 @@ def get_configuration(self): # CANTING MODIFICATION # TODO tjlarki: clean up surface normal code - def set_canting_from_equation( - self, func: Callable[[float, float], float], move_centriods: bool = False - ) -> None: + def set_canting_from_equation(self, func: Callable[[float, float], float], move_centriods: bool = False) -> None: """ Uses an equation to set the canting of the facets on the heliostat. @@ -465,9 +398,7 @@ def set_on_axis_canting(self, aimpoint: Pxyz): # self.set_configuration(current_configuration) # self.update_position_in_space() - def set_off_axis_canting( - self, long_lat: tuple, aimpoint: Pxyz, time_ymdhmsz: tuple - ): + def set_off_axis_canting(self, long_lat: tuple, aimpoint: Pxyz, time_ymdhmsz: tuple): # we need the Rotation R1 (canting rotation) # we will call the heliostat roation R2 # R2 * R1 = R (total rotation) @@ -493,18 +424,14 @@ def set_off_axis_canting( for facet in self.facets: # facet_aiming_direction = up_vector.rotate(facet.composite_rotation) vector_the_normal_should_be = Vxyz( - st.tracking_surface_normal_xyz( - facet.origin, aimpoint_xyz, long_lat, time_ymdhmsz - ) + st.tracking_surface_normal_xyz(facet.origin, aimpoint_xyz, long_lat, time_ymdhmsz) ) R1_prime = heliostat_aiming_direction.align_to(vector_the_normal_should_be) facet.canting = copy.deepcopy(R2_inv * R1_prime * R2) self.set_configuration(current_configuration) self.update_position_in_space() - def rodrigues_vector_from_partial_derivatives( - self, dfdx_n: float, dfdy_n: float - ) -> np.ndarray: + def rodrigues_vector_from_partial_derivatives(self, dfdx_n: float, dfdy_n: float) -> np.ndarray: """ Constructs a Rodrigues vector from partial derivatives of a surface normal that is pointing nearly straight up. @@ -521,9 +448,7 @@ def rodrigues_vector_from_partial_derivatives( if (dfdx_n == 0) and (dfdy_n == 0): # Then the surface normal is vertical, and there is no rotation. # TODO: THIS SHOULD RETURN AN ACTUAL Vxyz. - return np.array( - [0.0, 0.0, 0.0] - ) # Zero magnitude implies zero rotation; direction doesn't matter. + return np.array([0.0, 0.0, 0.0]) # Zero magnitude implies zero rotation; direction doesn't matter. else: # There is a rotation. @@ -535,9 +460,7 @@ def rodrigues_vector_from_partial_derivatives( v_norm = np.linalg.norm(v) if v_norm == 0.0: # TODO RCB: REPLACE THIS WITH LOG/EXCEPTION THROW. - print( - 'ERROR: In Heliostat.set_canting_from_equation(), encountered unexpected zero v_norm.' - ) + print('ERROR: In Heliostat.set_canting_from_equation(), encountered unexpected zero v_norm.') print(' x_vec = ', x_vec) print(' y_vec = ', y_vec) assert False @@ -549,18 +472,14 @@ def rodrigues_vector_from_partial_derivatives( v_rot_norm = np.linalg.norm(v_rot) if v_rot_norm == 0.0: # TODO RCB: REPLACE THIS WITH LOG/EXCEPTION THROW. - print( - 'ERROR: In Heliostat.set_canting_from_equation(), encountered unexpected zero v_rot_norm.' - ) + print('ERROR: In Heliostat.set_canting_from_equation(), encountered unexpected zero v_rot_norm.') print(' u = ', u) print(' v_rot = ', v_rot) assert False u_rot = v_rot / v_rot_norm # axis of rotation normalized - theta_rot = -mt.robust_arccos( - np.dot(u, np.array([0.0, 0.0, 1.0])) - ) # angle of rotation + theta_rot = -mt.robust_arccos(np.dot(u, np.array([0.0, 0.0, 1.0]))) # angle of rotation # TODO: THIS SHOULD RETURN AN ACTUAL Vxyz. return theta_rot * u_rot @@ -596,16 +515,12 @@ def set_center_facet(self, facet: Facet | str): def set_tracking(self, aimpoint_xyz, location_lon_lat, when_ymdhmsz): # checks if len(location_lon_lat) != 2: - raise ValueError( - f'{location_lon_lat} must be of length 2. (Lattitude, Longitude)' - ) + raise ValueError(f'{location_lon_lat} must be of length 2. (Lattitude, Longitude)') # Save tracking command. self._aimpoint_xyz = aimpoint_xyz self._when_ymdhmsz = when_ymdhmsz # Set tracking configuration. - h_config = self.compute_tracking_configuration( - aimpoint_xyz, location_lon_lat, when_ymdhmsz - ) + h_config = self.compute_tracking_configuration(aimpoint_xyz, location_lon_lat, when_ymdhmsz) self.set_configuration(h_config, clear_tracking=False) @strict_types @@ -614,9 +529,7 @@ def set_tracking_from_sun_vector(self, aimpoint_xyz, sun_vector: Vxyz): # Save tracking command. self._aimpoint_xyz = aimpoint_xyz # Set tracking configuration. - h_config = self.compute_tracking_configuration_from_sun_vector( - aimpoint_xyz, sun_vector - ) + h_config = self.compute_tracking_configuration_from_sun_vector(aimpoint_xyz, sun_vector) self.set_configuration(h_config, clear_tracking=False) def set_stow(self): @@ -627,9 +540,7 @@ def set_face_up(self): h_config = self.compute_face_up_configuration() self.set_configuration(h_config, clear_tracking=True) - def set_configuration( - self, h_config: hc.HeliostatConfiguration, clear_tracking=True - ): + def set_configuration(self, h_config: hc.HeliostatConfiguration, clear_tracking=True): # TODO is this function safe to call multiple times? For example to update with small sun tracking changes. # Clear tracking command. if clear_tracking: @@ -663,13 +574,9 @@ def set_configuration( vector_offset = hel_rotation.apply(vector) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! TODO tjlarki: Line below is confusing to me - origin = ( - np.array(self.origin) + vector_offset - ) # Origin is at torque tube center. + origin = np.array(self.origin) + vector_offset # Origin is at torque tube center. - self.surface_normal = hel_rotation.apply( - [0, 0, 1] - ) # Before rotation, heliostat is face up. + self.surface_normal = hel_rotation.apply([0, 0, 1]) # Before rotation, heliostat is face up. self.el = el self.az = az @@ -711,9 +618,7 @@ def _modification_check(self, func): # TODO tjlarki: finish after any function decorated with it.""" ... - def update_position_in_space( - self, - ): # TODO tjlarki: should this be in the RayTraceable object? + def update_position_in_space(self): # TODO tjlarki: should this be in the RayTraceable object? """updates the heliostat position in space, and all RayTracables contained in self""" self.set_position_in_space(self.origin, self.rotation) @@ -727,16 +632,10 @@ def most_basic_ray_tracable_objects(self) -> list[RayTraceable]: # RENDERING - def draw( - self, - view: View3d, - heliostat_styles: RenderControlHeliostat | RenderControlEnsemble = None, - ): + def draw(self, view: View3d, heliostat_styles: RenderControlHeliostat | RenderControlEnsemble = None): # Assumes that heliostat configuration has already been set. if heliostat_styles == None: - heliostat_styles = RenderControlEnsemble( - default_style=rch.mirror_surfaces() - ) + heliostat_styles = RenderControlEnsemble(default_style=rch.mirror_surfaces()) # Fetch draw style control. heliostat_style = heliostat_styles.style(self.name) @@ -747,12 +646,7 @@ def draw( # Outline. if heliostat_style.draw_outline: - corners = [ - self.top_left_corner, - self.top_right_corner, - self.bottom_right_corner, - self.bottom_left_corner, - ] + corners = [self.top_left_corner, self.top_right_corner, self.bottom_right_corner, self.bottom_left_corner] view.draw_xyz_list(corners, close=True, style=heliostat_style.outline_style) # Facets. @@ -763,31 +657,19 @@ def draw( # Surface normal. if heliostat_style.draw_surface_normal: # Construct ray. - surface_normal_ray = self.surface_normal_ray( - self.origin, heliostat_style.surface_normal_length - ) + surface_normal_ray = self.surface_normal_ray(self.origin, heliostat_style.surface_normal_length) # Draw ray and its base. view.draw_xyz(self.origin, style=heliostat_style.surface_normal_base_style) - view.draw_xyz_list( - surface_normal_ray, style=heliostat_style.surface_normal_style - ) + view.draw_xyz_list(surface_normal_ray, style=heliostat_style.surface_normal_style) # Surface normal drawn at corners. # (Not the surface normal at the corner. Facet curvature is not shown.) if heliostat_style.draw_surface_normal_at_corners: # Construct rays. - top_left_ray = self.surface_normal_ray( - self.top_left_corner, heliostat_style.corner_normal_length - ) - top_right_ray = self.surface_normal_ray( - self.top_right_corner, heliostat_style.corner_normal_length - ) - bottom_left_ray = self.surface_normal_ray( - self.bottom_left_corner, heliostat_style.corner_normal_length - ) - bottom_right_ray = self.surface_normal_ray( - self.bottom_right_corner, heliostat_style.corner_normal_length - ) + top_left_ray = self.surface_normal_ray(self.top_left_corner, heliostat_style.corner_normal_length) + top_right_ray = self.surface_normal_ray(self.top_right_corner, heliostat_style.corner_normal_length) + bottom_left_ray = self.surface_normal_ray(self.bottom_left_corner, heliostat_style.corner_normal_length) + bottom_right_ray = self.surface_normal_ray(self.bottom_right_corner, heliostat_style.corner_normal_length) rays = [top_left_ray, top_right_ray, bottom_left_ray, bottom_right_ray] # Draw each ray and its base. for base, ray in zip(corners, rays): @@ -798,9 +680,7 @@ def draw( if heliostat_style.draw_name: view.draw_xyz_text(self.origin, self.name, style=heliostat_style.name_style) - def survey_of_points( - self, resolution, random_dist: bool = False - ) -> tuple[Pxyz, Vxyz]: + def survey_of_points(self, resolution, random_dist: bool = False) -> tuple[Pxyz, Vxyz]: """ Returns a grid of equispaced points and the normal vectors at those points. @@ -817,9 +697,7 @@ def survey_of_points( points = Pxyz.empty() normals = Vxyz.empty() for facet in self.facets: - additional_points, additional_normals = facet.survey_of_points( - resolution, random_dist - ) + additional_points, additional_normals = facet.survey_of_points(resolution, random_dist) points = points.concatenate(additional_points) normals = normals.concatenate(additional_normals) @@ -862,13 +740,9 @@ def h_from_facet_centroids( default_mirror_shape: Callable[[float, float], float] = lambda x, y: x * 0, ) -> 'Heliostat': # Facets - facets, _ = facets_read_file( - file_centroids_offsets, facet_height, facet_width, default_mirror_shape - ) + facets, _ = facets_read_file(file_centroids_offsets, facet_height, facet_width, default_mirror_shape) - return Heliostat( - name, origin, num_facets, num_rows, num_cols, facets, pivot_height, pivot_offset - ) + return Heliostat(name, origin, num_facets, num_rows, num_cols, facets, pivot_height, pivot_offset) # TODO tjlarki: update this to be a class method diff --git a/opencsp/common/lib/csp/ufacet/HeliostatConfiguration.py b/opencsp/common/lib/csp/ufacet/HeliostatConfiguration.py index f5ba278c..35cf27cd 100644 --- a/opencsp/common/lib/csp/ufacet/HeliostatConfiguration.py +++ b/opencsp/common/lib/csp/ufacet/HeliostatConfiguration.py @@ -14,9 +14,7 @@ class HeliostatConfiguration: def __init__( self, - az: float = np.deg2rad( - 180 - ), # (az,el) = (180,90) degrees corresponde to pointing straight up, + az: float = np.deg2rad(180), # (az,el) = (180,90) degrees corresponde to pointing straight up, el: float = np.deg2rad(90), ) -> None: # as if transitioned by tilting up from face south orientation. super(HeliostatConfiguration, self).__init__() diff --git a/opencsp/common/lib/cv/AbstractFiducial.py b/opencsp/common/lib/cv/AbstractFiducial.py index f8920d34..c0b7ab36 100644 --- a/opencsp/common/lib/cv/AbstractFiducial.py +++ b/opencsp/common/lib/cv/AbstractFiducial.py @@ -45,9 +45,7 @@ def scale(self) -> float: fiducial relative to the camera.""" @classmethod - def locate_instances( - self, img: np.ndarray, anticipated_unit_vector: v3.Vxyz = None - ) -> list["AbstractFiducial"]: + def locate_instances(self, img: np.ndarray, anticipated_unit_vector: v3.Vxyz = None) -> list["AbstractFiducial"]: """For the given input image, find and report any regions that strongly match this fiducial type. Parameters: diff --git a/opencsp/common/lib/cv/CacheableImage.py b/opencsp/common/lib/cv/CacheableImage.py index 962ef4f6..7174a483 100644 --- a/opencsp/common/lib/cv/CacheableImage.py +++ b/opencsp/common/lib/cv/CacheableImage.py @@ -9,9 +9,7 @@ class CacheableImage: - def __init__( - self, array: np.ndarray = None, cache_path: str = None, source_path: str = None - ): + def __init__(self, array: np.ndarray = None, cache_path: str = None, source_path: str = None): """An image container that allows for caching an image when the image data isn't in use, or for retrieval of an image from the cached file when the data is in use. @@ -38,8 +36,7 @@ def __init__( """ if array is None and cache_path == None and source_path == None: lt.error_and_raise( - ValueError, - "Error in CacheableImage.__init__(): must provide one of array, cache_path, or source_path!", + ValueError, "Error in CacheableImage.__init__(): must provide one of array, cache_path, or source_path!" ) self.validate_cache_path(cache_path, "__init__") self._array = array @@ -52,9 +49,7 @@ def __sizeof__(self) -> int: return sys.getsizeof(self._array) + sys.getsizeof(self._image) @classmethod - def from_single_source( - cls, array_or_path: Union[np.ndarray, str, 'CacheableImage'] - ): + def from_single_source(cls, array_or_path: Union[np.ndarray, str, 'CacheableImage']): """Generates a CacheableImage from the given numpy or image file.""" if isinstance(array_or_path, CacheableImage): return array_or_path @@ -68,8 +63,7 @@ def from_single_source( return cls(array=array) else: lt.error_and_raise( - TypeError, - f"Error in CacheableImage.from_single_source(): unexpected type {type(array_or_path)}", + TypeError, f"Error in CacheableImage.from_single_source(): unexpected type {type(array_or_path)}" ) def validate_cache_path(self, cache_path: Optional[str], caller_name: str): diff --git a/opencsp/common/lib/cv/OpticalFlow.py b/opencsp/common/lib/cv/OpticalFlow.py index fcd5bb1b..f6d2beec 100644 --- a/opencsp/common/lib/cv/OpticalFlow.py +++ b/opencsp/common/lib/cv/OpticalFlow.py @@ -101,10 +101,7 @@ def __init__( if st.is_production_run(): if cache: - lt.error_and_raise( - ValueError, - "OpticalFlow(cache=True) should not be used in production code!", - ) + lt.error_and_raise(ValueError, "OpticalFlow(cache=True) should not be used in production code!") def clear_cache(self): if ft.directory_exists(self._cache_dir): @@ -164,9 +161,7 @@ def _save_to_cache(self): ft.delete_file(cache_dat_file, error_on_not_exists=False) else: cache_txt_file_down = os.path.join(self._cache_dir, f"cache{i+1}.txt") - cache_dat_file_down = os.path.join( - self._cache_dir, f"cache{i+1}.pickle" - ) + cache_dat_file_down = os.path.join(self._cache_dir, f"cache{i+1}.pickle") if ft.file_exists(cache_txt_file): shutil.move(cache_txt_file, cache_txt_file_down) shutil.move(cache_dat_file, cache_dat_file_down) @@ -229,9 +224,7 @@ def dense(self) -> tuple[np.ndarray, np.ndarray]: - np.ndarray: self.mag, the magnitude of the flow per pixel (pixels) - np.ndarray: self.ang, the direction of the flow per pixel (radians, 0 to the right, positive counter-clockwise) """ - if ( - self._mag is None - ): # use "is", with "==" numpy does an element-wise comparison + if self._mag is None: # use "is", with "==" numpy does an element-wise comparison if not self._load_from_cache(): # load the images frame1 = self._load_image(1) @@ -350,9 +343,7 @@ def to_img(self, mag_render_clip: tuple[float, float] = None): # HSV range is 0-179 (H), 0-255 (S), 0-255 (V) # https://docs.opencv.org/3.4/df/d9d/tutorial_py_colorspaces.html if not isinstance(self.mag, np.ndarray): - raise RuntimeError( - "Error: in OpticalFlow.to_img: self.mag is not an np.ndarray, must call dense() first!" - ) + raise RuntimeError("Error: in OpticalFlow.to_img: self.mag is not an np.ndarray, must call dense() first!") h, w = self.mag.shape hsv = np.zeros(shape=(h, w, 3), dtype=np.uint8) @@ -462,32 +453,16 @@ def draw_flow_angle_reference(self): fig_record.view.draw_image(square_rgb) fig_record.view.draw_pq_text( - (1, 0.5), - "0", - style=rct.RenderControlText( - color='k', fontsize=20, horizontalalignment='right' - ), + (1, 0.5), "0", style=rct.RenderControlText(color='k', fontsize=20, horizontalalignment='right') ) fig_record.view.draw_pq_text( - (0.5, 1), - "Ï€/2", - style=rct.RenderControlText( - color='k', fontsize=20, verticalalignment='top' - ), + (0.5, 1), "Ï€/2", style=rct.RenderControlText(color='k', fontsize=20, verticalalignment='top') ) fig_record.view.draw_pq_text( - (0, 0.5), - "Ï€", - style=rct.RenderControlText( - color='k', fontsize=20, horizontalalignment='left' - ), + (0, 0.5), "Ï€", style=rct.RenderControlText(color='k', fontsize=20, horizontalalignment='left') ) fig_record.view.draw_pq_text( - (0.5, 0), - "3Ï€/2", - style=rct.RenderControlText( - color='k', fontsize=20, verticalalignment='bottom' - ), + (0.5, 0), "3Ï€/2", style=rct.RenderControlText(color='k', fontsize=20, verticalalignment='bottom') ) ang = 0 @@ -502,9 +477,7 @@ def draw_flow_angle_reference(self): sang = "." + sang.split(".")[1] else: sang = "%d" % int(ang) - fig_record.view.draw_pq_text( - (x, y), sang, style=rct.RenderControlText(color='k', fontsize=20) - ) + fig_record.view.draw_pq_text((x, y), sang, style=rct.RenderControlText(color='k', fontsize=20)) prev_ang = ang fig_record.view.show(block=True) @@ -547,9 +520,7 @@ def save(self, dir: str, name_ext="", overwrite=False): dir_name_ext = os.path.join(dir, name_ext) # sanity check - if ( - self._mag is None - ): # "is" instead of "==" to avoid np.ndarray element-wise comparison + if self._mag is None: # "is" instead of "==" to avoid np.ndarray element-wise comparison lt.error_and_raise( RuntimeError, "Error: in OpticalFlow.save: unable to save non-existant matrices 'magnitude' and 'angle'. Run dense() first!", @@ -600,10 +571,7 @@ def load(self, dir: str, name_ext: str = "", error_on_not_exist=True): # check that the file exists if not ft.file_exists(dir_name_ext): if error_on_not_exist: - lt.error_and_raise( - RuntimeError, - f"Error: in OpticalFlow.load: file \"{dir_name_ext}\" doesn't exist!", - ) + lt.error_and_raise(RuntimeError, f"Error: in OpticalFlow.load: file \"{dir_name_ext}\" doesn't exist!") return None, None # load! diff --git a/opencsp/common/lib/cv/SpotAnalysis.py b/opencsp/common/lib/cv/SpotAnalysis.py index 3f7c50fe..701bdb4c 100644 --- a/opencsp/common/lib/cv/SpotAnalysis.py +++ b/opencsp/common/lib/cv/SpotAnalysis.py @@ -9,19 +9,10 @@ # from opencsp.common.lib.cv.spot_analysis.image_processor import * # I suggest importing these dynamically as needed, to reduce startup time from opencsp.common.lib.cv.spot_analysis.ImagesIterable import ImagesIterable from opencsp.common.lib.cv.spot_analysis.ImagesStream import ImagesStream -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ( - ImageType, - SpotAnalysisImagesStream, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperablesStream import ( - SpotAnalysisOperablesStream, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser import ( - SpotAnalysisOperableAttributeParser, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType, SpotAnalysisImagesStream +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperablesStream import SpotAnalysisOperablesStream +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser import SpotAnalysisOperableAttributeParser import opencsp.common.lib.render.VideoHandler as vh import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.image_tools as it @@ -171,9 +162,7 @@ def __init__( self.set_image_processors(image_processors) - def set_image_processors( - self, image_processors: list[asaip.AbstractSpotAnalysisImagesProcessor] - ): + def set_image_processors(self, image_processors: list[asaip.AbstractSpotAnalysisImagesProcessor]): self.image_processors = image_processors # chain the image processors together @@ -187,9 +176,7 @@ def set_image_processors( image_processors_persistant_memory_total, ) - mem_per_image_processor = image_processors_persistant_memory_total / len( - self.image_processors - ) + mem_per_image_processor = image_processors_persistant_memory_total / len(self.image_processors) for image_processor in self.image_processors: image_processor._allowed_memory_footprint = mem_per_image_processor @@ -213,9 +200,7 @@ def _assign_inputs(self, input_operables: Iterator[SpotAnalysisOperable]): self._prev_result = None self.image_processors[0].assign_inputs(self.input_stream) - def set_primary_images( - self, images: list[str] | list[np.ndarray] | vh.VideoHandler | ImagesStream - ): + def set_primary_images(self, images: list[str] | list[np.ndarray] | vh.VideoHandler | ImagesStream): """Assigns the images of the spot to be analyzed, in preparation for process_next(). See also: set_input_operables()""" @@ -224,21 +209,14 @@ def set_primary_images( self._assign_inputs(SpotAnalysisOperablesStream(images_stream)) def set_input_operables( - self, - input_operables: ( - SpotAnalysisOperablesStream - | list[SpotAnalysisOperable] - | Iterator[SpotAnalysisOperable] - ), + self, input_operables: SpotAnalysisOperablesStream | list[SpotAnalysisOperable] | Iterator[SpotAnalysisOperable] ): """Assigns primary and supporting images, and other necessary data, in preparation for process_next(). See also: set_primary_images()""" self._assign_inputs(input_operables) - def set_default_support_images( - self, support_images: dict[ImageType, CacheableImage] - ): + def set_default_support_images(self, support_images: dict[ImageType, CacheableImage]): """Provides extra support images for use during image processing, as a default for when the support images are not otherwise from the input operables. Note that this does not include the primary images or other @@ -249,9 +227,7 @@ def set_default_data(self, operable: SpotAnalysisOperable): """Provides extra data for use during image processing, as a default for when the data is not otherwise from the input operables. Note that this does not include the primary or supporting images.""" - self.input_stream.set_defaults( - self.input_stream.default_support_images, operable - ) + self.input_stream.set_defaults(self.input_stream.default_support_images, operable) def process_next(self): """Attempts to get the next processed image and results data from the @@ -268,9 +244,7 @@ def process_next(self): # Release memory from the previous result if self._prev_result != None: - self.image_processors[-1].cache_image_to_disk_as_necessary( - self._prev_result - ) + self.image_processors[-1].cache_image_to_disk_as_necessary(self._prev_result) self._prev_result = None # Attempt to get the next image. Raises StopIteration if there are no @@ -283,9 +257,7 @@ def process_next(self): self._prev_result = result return result - def _save_image( - self, save_path_name_ext: str, image: CacheableImage, description: str - ): + def _save_image(self, save_path_name_ext: str, image: CacheableImage, description: str): # check for overwrite if ft.file_exists(save_path_name_ext): if self.save_overwrite: @@ -349,9 +321,7 @@ def save_image( # Get the original file name orig_image_path_name = "" if operable.primary_image.source_path != None: - _, orig_image_path_name, _ = ft.path_components( - operable.primary_image.source_path - ) + _, orig_image_path_name, _ = ft.path_components(operable.primary_image.source_path) orig_image_path_name += "_" # Get the output name of the file to save to @@ -364,9 +334,7 @@ def save_image( image_path_name_ext = os.path.join(save_dir, f"{image_name}.{save_ext}") # Try to save the image - if not self._save_image( - image_path_name_ext, operable.primary_image, "primary image" - ): + if not self._save_image(image_path_name_ext, operable.primary_image, "primary image"): return # Save supporting images @@ -384,9 +352,7 @@ def save_image( # save the image self._save_image( - supporting_image_path_name_ext, - supporting_image, - f"{supporting_image_name} supporting image", + supporting_image_path_name_ext, supporting_image, f"{supporting_image_name} supporting image" ) # Save associated attributes @@ -429,13 +395,9 @@ def __next__(self): LogScaleImageProcessor(), FalseColorImageProcessor(), ] - sa = SpotAnalysis( - "BCS Test", image_processors, save_dir=outdir, save_overwrite=True - ) + sa = SpotAnalysis("BCS Test", image_processors, save_dir=outdir, save_overwrite=True) image_name_exts = ft.files_in_directory(indir) - image_path_name_exts = [ - os.path.join(indir, image_name_ext) for image_name_ext in image_name_exts - ] + image_path_name_exts = [os.path.join(indir, image_name_ext) for image_name_ext in image_name_exts] sa.set_primary_images(image_path_name_exts) # Iterating through the returned results causes the images to be processed @@ -445,9 +407,7 @@ def __next__(self): # save out the images and associated attributes save_path = sa.save_image(result) if save_path is None: - lt.warn( - f"Failed to save image. Maybe SpotAnalaysis.save_overwrite is False? ({sa.save_overwrite=})" - ) + lt.warn(f"Failed to save image. Maybe SpotAnalaysis.save_overwrite is False? ({sa.save_overwrite=})") else: lt.info(f"Saved image to {save_path}") diff --git a/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py b/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py index 7d6b6a3b..262381ef 100644 --- a/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py +++ b/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py @@ -10,9 +10,7 @@ class _IndexableIterable(Iterable[CacheableImage]): """A restartable iterable (via an iter() call) that piggybacks off of an indexable object.""" - def __init__( - self, src: list[str | CacheableImage] | Callable[[int], CacheableImage] - ): + def __init__(self, src: list[str | CacheableImage] | Callable[[int], CacheableImage]): self.src = src self.idx = 0 @@ -62,21 +60,12 @@ def _video_to_list(self): video.extract_frames() frame_format = video.get_extracted_frame_path_and_name_format() frame_dir, _, frame_ext = ft.path_components(frame_format) - frame_names: list[str] = ft.files_in_directory_by_extension( - frame_dir, [frame_ext] - )[frame_ext] + frame_names: list[str] = ft.files_in_directory_by_extension(frame_dir, [frame_ext])[frame_ext] return [os.path.join(frame_dir, frame_name) for frame_name in frame_names] class ImagesIterable(Iterable[CacheableImage]): - def __init__( - self, - stream: ( - Callable[[int], CacheableImage] - | list[str | CacheableImage] - | vh.VideoHandler - ), - ): + def __init__(self, stream: Callable[[int], CacheableImage] | list[str | CacheableImage] | vh.VideoHandler): """A restartable iterable that returns one image at a time, for as long as images are still available. Iterates over an iterator or callable that returns one image at a time. diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py index a8278172..479ef9d6 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py @@ -47,9 +47,7 @@ def __init__( ) del other_iterators[ImageType.PRIMARY] - self.current_iterators: dict[ImageType, ImagesIterable | ImagesStream] = { - ImageType.PRIMARY: None - } + self.current_iterators: dict[ImageType, ImagesIterable | ImagesStream] = {ImageType.PRIMARY: None} def __iter__(self): self.current_iterators = {} diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py index ebb8a0c6..72694888 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py @@ -8,9 +8,7 @@ import opencsp.common.lib.cv.AbstractFiducial as af from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisPopulationStatistics import ( - SpotAnalysisPopulationStatistics, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisPopulationStatistics import SpotAnalysisPopulationStatistics import opencsp.common.lib.tool.file_tools as ft @@ -96,20 +94,14 @@ def __post_init__(self): if requires_update: # use __init__ to update frozen values self.__init__( - primary_image, - primary_image_source_path=primary_image_source_path, - supporting_images=supporting_images, + primary_image, primary_image_source_path=primary_image_source_path, supporting_images=supporting_images ) def __sizeof__(self) -> int: - return sys.getsizeof(self.primary_image) + sum( - [sys.getsizeof(im) for im in self.supporting_images.values()] - ) + return sys.getsizeof(self.primary_image) + sum([sys.getsizeof(im) for im in self.supporting_images.values()]) def replace_use_default_values( - self, - supporting_images: dict[ImageType, CacheableImage] = None, - data: 'SpotAnalysisOperable' = None, + self, supporting_images: dict[ImageType, CacheableImage] = None, data: 'SpotAnalysisOperable' = None ) -> 'SpotAnalysisOperable': """Sets the supporting_images and other data for an operable where they are None for this instance. Returns a new operable with the populated @@ -118,31 +110,19 @@ def replace_use_default_values( if supporting_images != None: for image_type in supporting_images: - if (image_type in ret.supporting_images) and ( - ret.supporting_images[image_type] != None - ): + if (image_type in ret.supporting_images) and (ret.supporting_images[image_type] != None): supporting_images[image_type] = ret.supporting_images[image_type] ret = replace(ret, supporting_images=supporting_images) if data != None: - given_fiducials = ( - data.given_fiducials - if self.given_fiducials == None - else self.given_fiducials - ) - found_fiducials = ( - data.found_fiducials - if self.found_fiducials == None - else self.found_fiducials - ) + given_fiducials = data.given_fiducials if self.given_fiducials == None else self.given_fiducials + found_fiducials = data.found_fiducials if self.found_fiducials == None else self.found_fiducials camera_intrinsics_characterization = ( data.camera_intrinsics_characterization if self.camera_intrinsics_characterization == None else self.camera_intrinsics_characterization ) - light_sources = ( - data.light_sources if self.light_sources == None else self.light_sources - ) + light_sources = data.light_sources if self.light_sources == None else self.light_sources ret = replace( ret, given_fiducials=given_fiducials, diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py index fdb0edf3..5445a8e5 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py @@ -7,9 +7,7 @@ class SpotAnalysisOperableAttributeParser(iap.ImageAttributeParser): def __init__(self, operable: sao.SpotAnalysisOperable = None, spot_analysis=None): # get the current image source path, and initialize the parent - current_image_source = tt.default( - lambda: operable.primary_image.source_path, None - ) + current_image_source = tt.default(lambda: operable.primary_image.source_path, None) super().__init__(current_image_source=current_image_source) # set values based on inputs + retrieved attributes @@ -17,24 +15,18 @@ def __init__(self, operable: sao.SpotAnalysisOperable = None, spot_analysis=None lambda: spot_analysis.image_processors, [] ) self.spot_analysis: str = tt.default(lambda: spot_analysis.name, None) - self.image_processors: list[str] = [ - processor.name for processor in image_processors - ] + self.image_processors: list[str] = [processor.name for processor in image_processors] # retrieve any available attributes from the associated attributes file if self._previous_attr != None: - self.set_defaults( - self._previous_attr.get_parser(SpotAnalysisOperableAttributeParser) - ) + self.set_defaults(self._previous_attr.get_parser(SpotAnalysisOperableAttributeParser)) def attributes_key(self) -> str: return "spot analysis attributes" def set_defaults(self, other: 'SpotAnalysisOperableAttributeParser'): self.spot_analysis = tt.default(self.spot_analysis, other.spot_analysis) - self.image_processors = tt.default( - self.image_processors, other.image_processors - ) + self.image_processors = tt.default(self.image_processors, other.image_processors) super().set_defaults(other) def has_contents(self) -> bool: @@ -42,18 +34,13 @@ def has_contents(self) -> bool: return True return super().has_contents() - def parse_my_contents( - self, file_path_name_ext: str, raw_contents: str, my_contents: any - ): + def parse_my_contents(self, file_path_name_ext: str, raw_contents: str, my_contents: any): self.spot_analysis = my_contents['spot_analysis_name'] self.image_processors = my_contents['image_processors'] super().parse_my_contents(file_path_name_ext, raw_contents, my_contents) def my_contents_to_json(self, file_path_name_ext: str) -> any: - ret = { - 'spot_analysis_name': self.spot_analysis, - 'image_processors': self.image_processors, - } + ret = {'spot_analysis_name': self.spot_analysis, 'image_processors': self.image_processors} ret = {**ret, **super().my_contents_to_json(file_path_name_ext)} return ret diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py index ada31604..541b687b 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py @@ -4,35 +4,20 @@ from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.ImagesIterable import ImagesIterable from opencsp.common.lib.cv.spot_analysis.ImagesStream import ImagesStream -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ( - ImageType, - SpotAnalysisImagesStream, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType, SpotAnalysisImagesStream +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable class SpotAnalysisOperablesStream(Iterator[SpotAnalysisOperable]): def __init__( - self, - images: ( - ImagesIterable - | ImagesStream - | SpotAnalysisImagesStream - | Iterator[SpotAnalysisOperable] - ), + self, images: ImagesIterable | ImagesStream | SpotAnalysisImagesStream | Iterator[SpotAnalysisOperable] ): self.images = images self.images_iter = None self.default_support_images: dict[ImageType, CacheableImage] = None self.default_data: SpotAnalysisOperable = None - def set_defaults( - self, - default_support_images: dict[ImageType, CacheableImage], - default_data: SpotAnalysisOperable, - ): + def set_defaults(self, default_support_images: dict[ImageType, CacheableImage], default_data: SpotAnalysisOperable): self.default_support_images = default_support_images self.default_data = default_data @@ -47,16 +32,12 @@ def __next_operable(self): elif isinstance(val, dict): primary_image = val[ImageType.PRIMARY] supporting_images = copy.copy(val) - return SpotAnalysisOperable( - primary_image, supporting_images=supporting_images - ) + return SpotAnalysisOperable(primary_image, supporting_images=supporting_images) else: primary_image = val return SpotAnalysisOperable(primary_image, {}) def __next__(self): operable = self.__next_operable() - operable = operable.replace_use_default_values( - self.default_support_images, self.default_data - ) + operable = operable.replace_use_default_values(self.default_support_images, self.default_data) return operable diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py index 5c73c2f2..1252fa6b 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py @@ -5,15 +5,9 @@ from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.ImagesIterable import ImagesIterable from opencsp.common.lib.cv.spot_analysis.ImagesStream import ImagesStream -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperablesStream import ( - SpotAnalysisOperablesStream, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ( - SpotAnalysisImagesStream, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperablesStream import SpotAnalysisOperablesStream +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import SpotAnalysisImagesStream from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessorLeger import ( AbstractSpotAnalysisImagesProcessorLeger, ) @@ -21,9 +15,7 @@ import opencsp.common.lib.tool.typing_tools as tt -class AbstractSpotAnalysisImagesProcessor( - Iterator[SpotAnalysisOperable], AbstractSpotAnalysisImagesProcessorLeger -): +class AbstractSpotAnalysisImagesProcessor(Iterator[SpotAnalysisOperable], AbstractSpotAnalysisImagesProcessorLeger): """Class to perform one step of image processing before spot analysis is performed. This is an abstract class. Implementations can be found in the same @@ -56,11 +48,7 @@ def run( ImagesIterable | ImagesStream | SpotAnalysisImagesStream - | Union[ - 'AbstractSpotAnalysisImagesProcessor', - list[SpotAnalysisOperable], - Iterator[SpotAnalysisOperable], - ] + | Union['AbstractSpotAnalysisImagesProcessor', list[SpotAnalysisOperable], Iterator[SpotAnalysisOperable]] ), ) -> list[CacheableImage]: """Performs image processing on the input images.""" @@ -73,9 +61,7 @@ def run( pass return copy.copy(self.all_processed_results) - def process_image( - self, input_operable: SpotAnalysisOperable, is_last: bool = False - ) -> list[SpotAnalysisOperable]: + def process_image(self, input_operable: SpotAnalysisOperable, is_last: bool = False) -> list[SpotAnalysisOperable]: """Should probably not be called by external classes. Evaluate this instance as an iterator instead. Executes this instance's image processing on a single given input @@ -143,9 +129,7 @@ def process_image( return ret @abstractmethod - def _execute( - self, operable: SpotAnalysisOperable, is_last: bool - ) -> list[SpotAnalysisOperable]: + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: """Evaluate an input primary image (and other images/data), and generate the output processed image(s) and data. The actual image processing method. Called from process_image(). @@ -291,9 +275,7 @@ def get_processed_image_save_callback( This method is designed to be used as a callback with self.on_image_processed(). """ idx_list = [0] - return lambda operable: self._save_image( - operable.primary_image, idx_list, dir, name_prefix, ext - ) + return lambda operable: self._save_image(operable.primary_image, idx_list, dir, name_prefix, ext) def on_image_processed(self, callback: Callable[[SpotAnalysisOperable], None]): """Registers the given callback, to be evaluated for each processed diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessorLeger.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessorLeger.py index 007a33f9..bab8ad40 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessorLeger.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessorLeger.py @@ -6,9 +6,7 @@ import sys from opencsp.common.lib.cv.CacheableImage import CacheableImage -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable import opencsp.common.lib.opencsp_path.opencsp_root_path as orp import opencsp.common.lib.tool.log_tools as lt import opencsp.common.lib.tool.file_tools as ft @@ -69,25 +67,19 @@ def __sizeof__(self) -> int: elif len(self.cummulative_processed_results) == 0: return 0 else: - return sys.getsizeof(self.cummulative_processed_results[0]) * len( - self.cummulative_processed_results - ) + return sys.getsizeof(self.cummulative_processed_results[0]) * len(self.cummulative_processed_results) def __del__(self): # delete cached numpy files if ft.directory_exists(self._get_tmp_path()): - ft.delete_files_in_directory( - self._get_tmp_path(), "*.npy", error_on_dir_not_exists=False - ) + ft.delete_files_in_directory(self._get_tmp_path(), "*.npy", error_on_dir_not_exists=False) if ft.directory_is_empty(self._get_tmp_path()): os.rmdir(self._get_tmp_path()) # delete output png files if self._my_tmp_dir != None: if self._clear_tmp_on_deconstruct: - ft.delete_files_in_directory( - self._my_tmp_dir, "*.png", error_on_dir_not_exists=False - ) + ft.delete_files_in_directory(self._my_tmp_dir, "*.png", error_on_dir_not_exists=False) if ft.directory_is_empty(self._my_tmp_dir): os.rmdir(self._my_tmp_dir) self._my_tmp_dir = None @@ -111,12 +103,8 @@ def input_operables(self) -> list[SpotAnalysisOperable] | None: """The input operables that were given to this instance before it did any image processing. None if the input wasn't a list or image processor type.""" - if isinstance( - self._original_operables, AbstractSpotAnalysisImagesProcessorLeger - ): - predecessor: AbstractSpotAnalysisImagesProcessorLeger = ( - self._original_operables - ) + if isinstance(self._original_operables, AbstractSpotAnalysisImagesProcessorLeger): + predecessor: AbstractSpotAnalysisImagesProcessorLeger = self._original_operables if predecessor.finished: return predecessor.all_results elif isinstance(self._original_operables, list): @@ -151,9 +139,7 @@ def all_results(self): def assign_inputs( self, operables: Union[ - 'AbstractSpotAnalysisImagesProcessorLeger', - list[SpotAnalysisOperable], - Iterator[SpotAnalysisOperable], + 'AbstractSpotAnalysisImagesProcessorLeger', list[SpotAnalysisOperable], Iterator[SpotAnalysisOperable] ], ): """Register the input operables to be processed either with the run() method, or as an iterator.""" @@ -164,10 +150,7 @@ def assign_inputs( self.finished_processing = False def initialize_cummulative_processed_results(self): - if ( - self.cummulative_processed_results != None - and len(self.cummulative_processed_results) > 0 - ): + if self.cummulative_processed_results != None and len(self.cummulative_processed_results) > 0: lt.error_and_raise( RuntimeError, f"Programmer error: initialized cummulative_processed_results at incorrect time. There are current {len(self.cummulative_processed_results)} in-flight results when there should be 0.", @@ -207,9 +190,7 @@ def cache_image_to_disk_as_necessary(self, operable: SpotAnalysisOperable): def _get_save_dir(self): """Finds a temporary directory to save to for the processed output images from this instance.""" if self._my_tmp_dir == None: - scratch_dir = os.path.join( - orp.opencsp_scratch_dir(), "spot_analysis_image_processing" - ) + scratch_dir = os.path.join(orp.opencsp_scratch_dir(), "spot_analysis_image_processing") i = 0 while True: dirname = self.name + str(i) @@ -234,9 +215,7 @@ def _get_tmp_path(self) -> str: Where to save the image. """ # get the path - path_name_ext = os.path.join( - self._get_save_dir(), f"{self._tmp_images_saved}.npy" - ) + path_name_ext = os.path.join(self._get_save_dir(), f"{self._tmp_images_saved}.npy") self._tmp_images_saved += 1 return path_name_ext @@ -251,8 +230,7 @@ def __len__(self) -> int: if self.all_processed_results != None: return len(self.all_processed_results) lt.error_and_raise( - RuntimeError, - "Can't get the length of this instance until all input images have been processed.", + RuntimeError, "Can't get the length of this instance until all input images have been processed." ) def save_processed_images(self, dir: str, name_prefix: str = None, ext="jpg"): @@ -266,18 +244,9 @@ def save_processed_images(self, dir: str, name_prefix: str = None, ext="jpg"): for idx, operable in enumerate(self.all_processed_results): self._save_image(operable.primary_image, [idx], dir, name_prefix, ext) - def _save_image( - self, - im: CacheableImage, - idx_list: list[int], - dir: str, - name_prefix: str = None, - ext="jpg", - ): + def _save_image(self, im: CacheableImage, idx_list: list[int], dir: str, name_prefix: str = None, ext="jpg"): idx = idx_list[0] - image_name = ( - "" if name_prefix == None else f"{name_prefix}_" - ) + f"SA_preprocess_{self.name}{idx}" + image_name = ("" if name_prefix == None else f"{name_prefix}_") + f"SA_preprocess_{self.name}{idx}" image_path_name_ext = os.path.join(dir, image_name + "." + ext) lt.debug("Saving SpotAnalysis processed image to " + image_path_name_ext) im.to_image().save(image_path_name_ext) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py index e3ccecd1..c09ca61b 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py @@ -1,6 +1,4 @@ -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) @@ -12,8 +10,6 @@ class EchoImageProcessor(AbstractSpotAnalysisImagesProcessor): def __init__(self): super().__init__(self.__class__.__name__) - def _execute( - self, operable: SpotAnalysisOperable, is_last: bool - ) -> list[SpotAnalysisOperable]: + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: lt.debug(f"Processing image {operable.primary_image_name_for_logs}") return [operable] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py index 3f17c223..19758899 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py @@ -2,9 +2,7 @@ import dataclasses import numpy as np -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) @@ -88,11 +86,7 @@ def _map_jet_human_rgb(input_color: int): elif input_color <= 255 * 3 + 128: # yellow to red ret = [255, 2 * ((255 * 3 + 128) - input_color), 0] else: # red to white - ret = [ - 255, - 2 * (input_color - (255 * 3 + 128)), - 2 * (input_color - (255 * 3 + 128)), - ] + ret = [255, 2 * (input_color - (255 * 3 + 128)), 2 * (input_color - (255 * 3 + 128))] return (ret[0] << 16) + (ret[1] << 8) + ret[2] def apply_mapping_jet_custom(self, operable: SpotAnalysisOperable, map_type: str): @@ -111,9 +105,7 @@ def apply_mapping_jet_custom(self, operable: SpotAnalysisOperable, map_type: str # red_to_white = 127/255 representable_colors = 255 * 6 if map_type == 'large' else 255 * 4 max_value = operable.max_popf - new_image: np.ndarray = operable.primary_image.nparray * ( - (representable_colors - 1) / max_value - ) + new_image: np.ndarray = operable.primary_image.nparray * ((representable_colors - 1) / max_value) new_image = np.clip(new_image, 0, representable_colors - 1).astype(np.int32) if len(new_image.shape) == 3: new_image = np.squeeze(new_image, axis=2) @@ -126,9 +118,7 @@ def apply_mapping_jet_custom(self, operable: SpotAnalysisOperable, map_type: str assert it.dims_and_nchannels(color_image)[1] == 3 # apply the mapping - map_func = ( - self._map_jet_large_rgb if map_type == 'large' else self._map_jet_human_rgb - ) + map_func = self._map_jet_large_rgb if map_type == 'large' else self._map_jet_human_rgb mapping = {k: map_func(k) for k in range(representable_colors)} new_image = np.vectorize(mapping.__getitem__)(new_image) color_image[:, :, 0] = new_image >> 16 @@ -153,9 +143,7 @@ def apply_mapping_jet(self, operable: SpotAnalysisOperable): # rescale to the number of representable colors representable_colors = 256 max_value = operable.max_popf - new_image: np.ndarray = operable.primary_image.nparray * ( - (representable_colors - 1) / max_value - ) + new_image: np.ndarray = operable.primary_image.nparray * ((representable_colors - 1) / max_value) new_image = np.clip(new_image, 0, representable_colors - 1) new_image = new_image.astype(np.uint8) @@ -164,9 +152,7 @@ def apply_mapping_jet(self, operable: SpotAnalysisOperable): return dataclasses.replace(operable, primary_image=ret) - def _execute( - self, operable: SpotAnalysisOperable, is_last: bool - ) -> list[SpotAnalysisOperable]: + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: image = operable.primary_image.nparray # verify that this is a grayscale image diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py index 8760ca48..8b61d41e 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py @@ -2,23 +2,14 @@ import numpy as np import opencsp.common.lib.tool.image_tools as it -from opencsp.common.lib.cv.spot_analysis.image_processor import ( - AbstractSpotAnalysisImagesProcessor, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) +from opencsp.common.lib.cv.spot_analysis.image_processor import AbstractSpotAnalysisImagesProcessor +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable class LogScaleImageProcessor(AbstractSpotAnalysisImagesProcessor): """Converts the input images into a log scale.""" - def __init__( - self, - max_value_input=0, - cummulative_max_value_input=False, - max_value_output=65535, - ): + def __init__(self, max_value_input=0, cummulative_max_value_input=False, max_value_output=65535): """A video processor that adjusts the scale of input images to a log scale. Args:: @@ -46,9 +37,7 @@ def __init__( self.cummulative_max_value_input = cummulative_max_value_input self.max_value_output = max_value_output - def _execute( - self, operable: SpotAnalysisOperable, is_last: bool - ) -> list[np.ndarray]: + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[np.ndarray]: primary_image: np.ndarray = operable.primary_image.nparray current_max_value_input = operable.max_popf @@ -58,9 +47,7 @@ def _execute( if operable.population_statistics != None: self.max_value_input = current_max_value_input else: - self.max_value_input = np.max( - [self.max_value_input, current_max_value_input] - ) + self.max_value_input = np.max([self.max_value_input, current_max_value_input]) else: self.max_value_input = current_max_value_input @@ -74,9 +61,7 @@ def _execute( # log and rescale the image log_image = np.log(primary_image + 1) log_max = np.max(log_image) - target_max_val = self.max_value_output * ( - current_max_value_input / self.max_value_input - ) + target_max_val = self.max_value_output * (current_max_value_input / self.max_value_input) scalar = target_max_val / log_max processed_image = scalar * log_image diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py index 4ed91a16..296a545d 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py @@ -2,12 +2,8 @@ import numpy as np from opencsp.common.lib.cv.CacheableImage import CacheableImage -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisPopulationStatistics import ( - SpotAnalysisPopulationStatistics, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisPopulationStatistics import SpotAnalysisPopulationStatistics from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) @@ -82,9 +78,7 @@ def _calculate_rolling_window( image = operable.primary_image.nparray dims, _ = it.dims_and_nchannels(image) sum_per_color = np.sum(image, axis=(0, 1)) - self._rolling_window_stats.append( - _RollingWindowOperableStats(operable, sum_per_color, dims) - ) + self._rolling_window_stats.append(_RollingWindowOperableStats(operable, sum_per_color, dims)) # calculate statistics first_stat = self._rolling_window_stats[0] @@ -100,11 +94,7 @@ def _calculate_rolling_window( return ret - def _calculate_cummulative( - self, - curr_stats: SpotAnalysisPopulationStatistics, - operable: SpotAnalysisOperable, - ): + def _calculate_cummulative(self, curr_stats: SpotAnalysisPopulationStatistics, operable: SpotAnalysisOperable): """Analyze the given operable and update the cummulative statistics.""" ret: SpotAnalysisPopulationStatistics = dataclasses.replace(curr_stats) @@ -125,18 +115,14 @@ def _calculate_cummulative( return ret - def _execute( - self, operable: SpotAnalysisOperable, is_last: bool - ) -> list[SpotAnalysisOperable]: + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: ret: list[SpotAnalysisOperable] = [] self.rolling_window_operables.append(operable) if self.curr_stats == None: # We haven't hit the minimum population size yet as of the previous call to _execute(). # Maybe we'll reach the minimum population size this time? - if (len(self.initial_operables) < self.min_pop_size - 1) or ( - self.min_pop_size == -1 - ): + if (len(self.initial_operables) < self.min_pop_size - 1) or (self.min_pop_size == -1): if not is_last: # We still haven't reached the minimum population size. self.initial_operables.append(operable) @@ -150,19 +136,11 @@ def _execute( self.curr_stats = self._calculate_rolling_window( self.curr_stats, prior_operable, self.rolling_window_operables ) - self.curr_stats = self._calculate_cummulative( - self.curr_stats, prior_operable - ) - ret.append( - dataclasses.replace( - prior_operable, population_statistics=self.curr_stats - ) - ) + self.curr_stats = self._calculate_cummulative(self.curr_stats, prior_operable) + ret.append(dataclasses.replace(prior_operable, population_statistics=self.curr_stats)) # do some calculations - self.curr_stats = self._calculate_rolling_window( - self.curr_stats, operable, self.rolling_window_operables - ) + self.curr_stats = self._calculate_rolling_window(self.curr_stats, operable, self.rolling_window_operables) self.curr_stats = self._calculate_cummulative(self.curr_stats, operable) ret.append(dataclasses.replace(operable, population_statistics=self.curr_stats)) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 321f3190..a173135e 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -1,15 +1,9 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) -from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import ( - LogScaleImageProcessor, -) -from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import ( - EchoImageProcessor, -) -from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import ( - FalseColorImageProcessor, -) +from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import LogScaleImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import EchoImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import FalseColorImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( PopulationStatisticsImageProcessor, ) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestFalseColorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestFalseColorImageProcessor.py index 439f6b1f..90d56770 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestFalseColorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestFalseColorImageProcessor.py @@ -3,12 +3,8 @@ import os from PIL import Image import unittest -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) -from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import ( - FalseColorImageProcessor, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import FalseColorImageProcessor import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.image_tools as it @@ -32,9 +28,7 @@ def test_jet_large(self): self.assertEqual(large_grayscale_image[1529, 1529], 1529) processor = FalseColorImageProcessor(map_type='large') - operable = processor.process_image(SpotAnalysisOperable(large_grayscale_image))[ - 0 - ] + operable = processor.process_image(SpotAnalysisOperable(large_grayscale_image))[0] actual_result = operable.primary_image.nparray actual_path_name_ext = os.path.join(self.out_dir, "test_jet_large.png") it.numpy_to_image(actual_result, 'clip').save(actual_path_name_ext) @@ -56,9 +50,7 @@ def test_jet_human(self): self.assertEqual(large_grayscale_image[1019, 1019], 1019) processor = FalseColorImageProcessor(map_type='human') - operable = processor.process_image(SpotAnalysisOperable(large_grayscale_image))[ - 0 - ] + operable = processor.process_image(SpotAnalysisOperable(large_grayscale_image))[0] actual_result = operable.primary_image.nparray actual_path_name_ext = os.path.join(self.out_dir, "test_jet_human.png") it.numpy_to_image(actual_result, 'clip').save(actual_path_name_ext) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestPopulationStatisticsImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestPopulationStatisticsImageProcessor.py index a3fd961c..2ff0de2b 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestPopulationStatisticsImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestPopulationStatisticsImageProcessor.py @@ -3,9 +3,7 @@ import os import unittest from opencsp.common.lib.cv.CacheableImage import CacheableImage -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import ( - SpotAnalysisOperable, -) +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( PopulationStatisticsImageProcessor, ) @@ -16,12 +14,8 @@ class TestPopulationStatisticsImageProcessor(unittest.TestCase): def setUp(self) -> None: path, _, _ = ft.path_components(__file__) - self.data_dir = os.path.join( - path, "data", "input", "PopulationStatisticsImageProcessor" - ) - self.out_dir = os.path.join( - path, "data", "output", "PopulationStatisticsImageProcessor" - ) + self.data_dir = os.path.join(path, "data", "input", "PopulationStatisticsImageProcessor") + self.out_dir = os.path.join(path, "data", "output", "PopulationStatisticsImageProcessor") im1 = np.array([[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]]) im2 = np.array([[[2, 2, 2], [2, 2, 2]], [[2, 2, 2], [2, 2, 2]]]) @@ -33,21 +27,15 @@ def setUp(self) -> None: self.processor._allowed_memory_footprint = pow(2, 30) def test_min_max_pop1(self): - stats = self.processor.process_image(self.operables[0], is_last=False)[ - 0 - ].population_statistics + stats = self.processor.process_image(self.operables[0], is_last=False)[0].population_statistics nptest.assert_array_equal(stats.minf, np.array([1, 1, 1])) nptest.assert_array_equal(stats.maxf, np.array([1, 1, 1])) - stats = self.processor.process_image(self.operables[1], is_last=False)[ - 0 - ].population_statistics + stats = self.processor.process_image(self.operables[1], is_last=False)[0].population_statistics nptest.assert_array_equal(stats.minf, np.array([1, 1, 1])) nptest.assert_array_equal(stats.maxf, np.array([2, 2, 2])) - stats = self.processor.process_image(self.operables[2], is_last=True)[ - 0 - ].population_statistics + stats = self.processor.process_image(self.operables[2], is_last=True)[0].population_statistics nptest.assert_array_equal(stats.minf, np.array([0, 1, 1])) nptest.assert_array_equal(stats.maxf, np.array([3, 4, 5])) @@ -97,19 +85,13 @@ def test_is_last(self): self.assertEqual(len(operables), 3) def test_avg_pop1(self): - stats = self.processor.process_image(self.operables[0], is_last=False)[ - 0 - ].population_statistics + stats = self.processor.process_image(self.operables[0], is_last=False)[0].population_statistics nptest.assert_array_almost_equal(stats.avgf_rolling_window, np.array([1, 1, 1])) - stats = self.processor.process_image(self.operables[1], is_last=False)[ - 0 - ].population_statistics + stats = self.processor.process_image(self.operables[1], is_last=False)[0].population_statistics nptest.assert_array_almost_equal(stats.avgf_rolling_window, np.array([2, 2, 2])) - stats = self.processor.process_image(self.operables[2], is_last=True)[ - 0 - ].population_statistics + stats = self.processor.process_image(self.operables[2], is_last=True)[0].population_statistics expected = np.array([0 + 1 + 2 + 3, 1 + 2 + 3 + 4, 2 + 3 + 4 + 5]) / 4 nptest.assert_array_almost_equal(stats.avgf_rolling_window, expected) @@ -122,12 +104,8 @@ def test_avg_pop3(self): operables = self.processor.process_image(self.operables[2], is_last=True) stats = [operable.population_statistics for operable in operables] - nptest.assert_array_almost_equal( - stats[0].avgf_rolling_window, np.array([1, 1, 1]) - ) - nptest.assert_array_almost_equal( - stats[1].avgf_rolling_window, np.array([1.5, 1.5, 1.5]) - ) + nptest.assert_array_almost_equal(stats[0].avgf_rolling_window, np.array([1, 1, 1])) + nptest.assert_array_almost_equal(stats[1].avgf_rolling_window, np.array([1.5, 1.5, 1.5])) expected = np.array([3 + 4 + 5 + 6, 4 + 5 + 6 + 7, 5 + 6 + 7 + 8]) / (3 * 4) nptest.assert_array_almost_equal(stats[2].avgf_rolling_window, expected) diff --git a/opencsp/common/lib/cv/test/test_OpticalFlow.py b/opencsp/common/lib/cv/test/test_OpticalFlow.py index e298f168..13dcce48 100644 --- a/opencsp/common/lib/cv/test/test_OpticalFlow.py +++ b/opencsp/common/lib/cv/test/test_OpticalFlow.py @@ -18,24 +18,10 @@ def setUpClass(cls) -> None: ret = super().setUpClass() cls.src_img_dir = os.path.join( - orp.opencsp_code_dir(), - 'common', - 'lib', - 'cv', - 'test', - 'data', - 'input', - 'OpticalFlow', + orp.opencsp_code_dir(), 'common', 'lib', 'cv', 'test', 'data', 'input', 'OpticalFlow' ) cls.dst_dir = os.path.join( - orp.opencsp_code_dir(), - 'common', - 'lib', - 'cv', - 'test', - 'data', - 'output', - 'OpticalFlow', + orp.opencsp_code_dir(), 'common', 'lib', 'cv', 'test', 'data', 'output', 'OpticalFlow' ) cls.src_img_file = os.path.join(cls.src_img_dir, "20210513F08f9800_w400.jpg") cls.tmp_img_dir = os.path.join(cls.dst_dir, "tmp") @@ -58,9 +44,7 @@ def setUp(self) -> None: return ret - def _prep_ref_img( - self, travel: int, frame_motion_dir="up", secondary_frame_motion_dir=None - ): + def _prep_ref_img(self, travel: int, frame_motion_dir="up", secondary_frame_motion_dir=None): """Crop and save image files for testing. Args: @@ -130,13 +114,7 @@ def _prep_ref_img( def _prep_limit_flow(self, magvals, angvals): img1_name_ext, img2_name_ext = self._prep_ref_img(10, "right") - flow = of.OpticalFlow( - self.tmp_img_dir, - img1_name_ext, - self.tmp_img_dir, - img2_name_ext, - cache=self.can_cache, - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=self.can_cache) flow.mag = np.array(magvals) flow.ang = np.array(angvals) return flow @@ -144,9 +122,7 @@ def _prep_limit_flow(self, magvals, angvals): def test_left(self): """Frame has moved left, image content has translated right""" img1_name_ext, img2_name_ext = self._prep_ref_img(10, "left") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) mag, ang = flow.dense() # left is a special case, because some of the angles will be ~0 and some will be ~2pi ang = (ang + np.pi) % (2 * np.pi) - np.pi @@ -155,106 +131,79 @@ def test_left(self): # bonus test! can we save without error? Image.fromarray(flow.to_img()).save( - os.path.join( - self.dst_dir, "frame_translate_left__image_data_translate_right.jpg" - ) + os.path.join(self.dst_dir, "frame_translate_left__image_data_translate_right.jpg") ) def test_left_down(self): """Frame has moved left and down, image content has translated right and up""" img1_name_ext, img2_name_ext = self._prep_ref_img(10, "left", "down") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) mag, ang = flow.dense() self.assertAlmostEqual(np.pi * (1 / 4), np.median(ang), delta=np.pi / 15) self.assertAlmostEqual(math.sqrt(10**2 + 10**2), np.average(mag), delta=0.3) # bonus test! can we save without error? Image.fromarray(flow.to_img()).save( - os.path.join( - self.dst_dir, - "frame_translate_left_down__image_data_translate_right_up.jpg", - ) + os.path.join(self.dst_dir, "frame_translate_left_down__image_data_translate_right_up.jpg") ) def test_down(self): """Frame has moved down, image content has translated up""" img1_name_ext, img2_name_ext = self._prep_ref_img(10, "down") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) mag, ang = flow.dense() self.assertAlmostEqual(10, np.average(mag), delta=0.3) self.assertAlmostEqual(np.pi * (1 / 2), np.average(ang), delta=np.pi / 15) # bonus test! can we save without error? Image.fromarray(flow.to_img()).save( - os.path.join( - self.dst_dir, "frame_translate_down__image_data_translate_up.jpg" - ) + os.path.join(self.dst_dir, "frame_translate_down__image_data_translate_up.jpg") ) def test_right(self): """Frame has moved right, image content has translated left""" img1_name_ext, img2_name_ext = self._prep_ref_img(10, "right") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) mag, ang = flow.dense() self.assertAlmostEqual(10, np.average(mag), delta=0.3) self.assertAlmostEqual(np.pi * (2 / 2), np.average(ang), delta=np.pi / 15) # bonus test! can we save without error? Image.fromarray(flow.to_img()).save( - os.path.join( - self.dst_dir, "frame_translate_right__image_data_translate_left.jpg" - ) + os.path.join(self.dst_dir, "frame_translate_right__image_data_translate_left.jpg") ) def test_up(self): """Frame has moved up, image content has translated down""" img1_name_ext, img2_name_ext = self._prep_ref_img(10, "up") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) mag, ang = flow.dense() self.assertAlmostEqual(10, np.average(mag), delta=0.3) self.assertAlmostEqual(np.pi * (3 / 2), np.average(ang), delta=np.pi / 15) # bonus test! can we save without error? Image.fromarray(flow.to_img()).save( - os.path.join( - self.dst_dir, "frame_translate_up__image_data_translate_down.jpg" - ) + os.path.join(self.dst_dir, "frame_translate_up__image_data_translate_down.jpg") ) def test_cache_1(self): if not self.can_cache: return img1_name_ext, img2_name_ext = self._prep_ref_img(10, "right") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) flow.clear_cache() # check that the cache doesn't exist - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(None, cache_file) # generate the cache files - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) flow.dense() # check that the cache exists - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(0, cache_idx) @@ -263,54 +212,36 @@ def test_cache_2(self): return img1_name_ext, img2_name_ext = self._prep_ref_img(10, "right") img3, img4 = self._prep_ref_img(10, "left") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) flow.clear_cache() # check that the cache doesn't exist - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(None, cache_file) - flow = of.OpticalFlow( - self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(None, cache_file) # generate the cache file for A - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) flow.dense() # check that the cache exists/doesn't exist - flow = of.OpticalFlow( - self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(None, cache_file) - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(0, cache_idx) # generate the cache file for B - flow = of.OpticalFlow( - self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True) flow.dense() # check that the cache files exist - flow = of.OpticalFlow( - self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img3, self.tmp_img_dir, img4, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(0, cache_idx) - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext, cache=True) cache_file, cache_idx = flow._get_existing_cache_file() self.assertEqual(1, cache_idx) @@ -384,9 +315,7 @@ def test_limit_by_angle_inside_bothneg(self): def test_save_load(self): """When we save and load, do we get the same results back?""" img1_name_ext, img2_name_ext = self._prep_ref_img(10, "left") - flow = of.OpticalFlow( - self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext - ) + flow = of.OpticalFlow(self.tmp_img_dir, img1_name_ext, self.tmp_img_dir, img2_name_ext) flow.dense() # convert to integers to enable perfect comparisons before/after save @@ -413,20 +342,10 @@ def test_save_load(self): # load, verify equal mag2, ang2 = flow.load(dir, name_ext) - self.assertTrue( - np.array_equal(_mag, flow._mag), - "_Magnitude (private) matrices not equal after load+save", - ) - self.assertTrue( - np.array_equal(_ang, flow._ang), - "_Angle (private) matrices not equal after load+save", - ) - self.assertTrue( - np.array_equal(mag, mag2), "Magnitude matrices not equal after load+save" - ) - self.assertTrue( - np.array_equal(ang, ang2), "Angle matrices not equal after load+save" - ) + self.assertTrue(np.array_equal(_mag, flow._mag), "_Magnitude (private) matrices not equal after load+save") + self.assertTrue(np.array_equal(_ang, flow._ang), "_Angle (private) matrices not equal after load+save") + self.assertTrue(np.array_equal(mag, mag2), "Magnitude matrices not equal after load+save") + self.assertTrue(np.array_equal(ang, ang2), "Angle matrices not equal after load+save") if __name__ == '__main__': diff --git a/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py b/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py index 4747fe42..df6ab939 100644 --- a/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py +++ b/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py @@ -30,11 +30,7 @@ class CalibrationCameraPosition: """ def __init__( - self, - camera: Camera, - pts_xyz_corners: Vxyz, - ids_corners: ndarray, - cal_image: ndarray, + self, camera: Camera, pts_xyz_corners: Vxyz, ids_corners: ndarray, cal_image: ndarray ) -> 'CalibrationCameraPosition': """Instantiates class @@ -83,10 +79,8 @@ def collect_corner_xyz_locations(self) -> None: # Get index of current marker index = ids_corners_list.index(marker_id * 4) # Extract calibrated corner locations (4 corners per marker) - self.pts_xyz_active_corner_locations = ( - self.pts_xyz_active_corner_locations.concatenate( - self.pts_xyz_corners[index : index + 4] - ) + self.pts_xyz_active_corner_locations = self.pts_xyz_active_corner_locations.concatenate( + self.pts_xyz_corners[index : index + 4] ) def calculate_camera_pose(self) -> None: @@ -96,10 +90,7 @@ def calculate_camera_pose(self) -> None: # Calculate rvec/tvec ret, rvec, tvec = cv.solvePnP( - self.pts_xyz_active_corner_locations.data.T, - pts_img, - self.camera.intrinsic_mat, - self.camera.distortion_coef, + self.pts_xyz_active_corner_locations.data.T, pts_img, self.camera.intrinsic_mat, self.camera.distortion_coef ) if not ret: raise ValueError('Camera calibration was not successful.') @@ -109,9 +100,7 @@ def calculate_camera_pose(self) -> None: self.rot_screen_cam = Rotation.from_rotvec(rvec) self.v_cam_screen_cam = Vxyz(tvec) - self.v_cam_screen_screen = self.v_cam_screen_cam.rotate( - self.rot_screen_cam.inv() - ) + self.v_cam_screen_screen = self.v_cam_screen_cam.rotate(self.rot_screen_cam.inv()) lt.info('Camera pose calculated:') lt.info(f'rvec: {self.rot_screen_cam.as_rotvec()}') @@ -143,9 +132,7 @@ def calculate_reprojection_error(self) -> None: """Calculates reprojection error""" # Project points self.pts_xy_marker_corners_reprojected = self.camera.project( - self.pts_xyz_active_corner_locations, - self.rot_screen_cam, - self.v_cam_screen_cam, + self.pts_xyz_active_corner_locations, self.rot_screen_cam, self.v_cam_screen_cam ) # Calculate errors @@ -178,24 +165,11 @@ def plot_reprojection_error(self) -> None: pts_img = np.vstack(self.pts_xy_marker_corners_list) ax.imshow(self.image, cmap='gray') - ax.scatter( - *pts_img.T, edgecolor='green', facecolor='none', label='Image Points' - ) - ax.scatter( - *self.pts_xy_marker_corners_reprojected.data, - marker='.', - color='blue', - label='Reprojected', - ) + ax.scatter(*pts_img.T, edgecolor='green', facecolor='none', label='Image Points') + ax.scatter(*self.pts_xy_marker_corners_reprojected.data, marker='.', color='blue', label='Reprojected') dx = self.errors_reprojection_xy.x dy = -self.errors_reprojection_xy.y - ax.quiver( - *self.pts_xy_marker_corners_reprojected.data, - dx, - dy, - label='Error', - color='red', - ) + ax.quiver(*self.pts_xy_marker_corners_reprojected.data, dx, dy, label='Error', color='red') ax.legend() ax.axis('off') diff --git a/opencsp/common/lib/deflectometry/ImageProjection.py b/opencsp/common/lib/deflectometry/ImageProjection.py index 8c214ff8..6c7105e2 100644 --- a/opencsp/common/lib/deflectometry/ImageProjection.py +++ b/opencsp/common/lib/deflectometry/ImageProjection.py @@ -44,12 +44,8 @@ def __init__(self, size_x: int, size_y: int) -> 'CalParams': y_ax = np.linspace(dy, size_y - dy, ny).astype(int) # pixels self.x_pixel_axis: np.ndarray[int] = x_ax # pixels self.y_pixel_axis: np.ndarray[int] = y_ax # pixels - self.x_screen_axis: np.ndarray[float] = 1 - ( - self.x_pixel_axis.astype(float) + 0.5 - ) / float(size_x) - self.y_screen_axis: np.ndarray[float] = 1 - ( - self.y_pixel_axis.astype(float) + 0.5 - ) / float(size_y) + self.x_screen_axis: np.ndarray[float] = 1 - (self.x_pixel_axis.astype(float) + 0.5) / float(size_x) + self.y_screen_axis: np.ndarray[float] = 1 - (self.y_pixel_axis.astype(float) + 0.5) / float(size_y) # Fiducial x/y locations in image, pixels x_mat_pixel, y_mat_pixel = np.meshgrid(self.x_pixel_axis, self.y_pixel_axis) self.x_pixel: np.ndarray[int] = x_mat_pixel.flatten() @@ -97,10 +93,7 @@ def __init__(self, root: tkinter.Tk, display_data: dict): # Create black image image = self._format_image( - np.zeros( - (self.win_size_y, self.win_size_x, 3), - dtype=self.display_data['projector_data_type'], - ) + np.zeros((self.win_size_y, self.win_size_x, 3), dtype=self.display_data['projector_data_type']) ) self.canvas_image = self.canvas.create_image(0, 0, image=image, anchor='nw') @@ -172,12 +165,7 @@ def upate_window(self, display_data: dict) -> None: # Resize window self.root.geometry( - '{:d}x{:d}+{:d}+{:d}'.format( - self.win_size_x, - self.win_size_y, - self.win_position_x, - self.win_position_y, - ) + '{:d}x{:d}+{:d}+{:d}'.format(self.win_size_x, self.win_size_y, self.win_position_x, self.win_position_y) ) # Resize canvas size @@ -189,13 +177,7 @@ def show_crosshairs(self) -> None: """ # Add white active region - array = ( - np.ones( - (self.size_y, self.size_x, 3), - dtype=self.display_data['projector_data_type'], - ) - * self.max_int - ) + array = np.ones((self.size_y, self.size_x, 3), dtype=self.display_data['projector_data_type']) * self.max_int # Add crosshairs vertical array[:, self.x_active_mid, :] = 0 @@ -221,13 +203,7 @@ def show_axes(self) -> None: """ # Add white active region - array = ( - np.ones( - (self.size_y, self.size_x, 3), - dtype=self.display_data['projector_data_type'], - ) - * self.max_int - ) + array = np.ones((self.size_y, self.size_x, 3), dtype=self.display_data['projector_data_type']) * self.max_int # Add arrows width = int(np.min([self.size_x, self.size_y]) / 4) @@ -261,13 +237,7 @@ def show_axes(self) -> None: # Add Y text font = cv.FONT_HERSHEY_PLAIN array = cv.putText( - array, - 'Y', - (self.x_active_mid + 20, self.y_active_mid + width + 20), - font, - 6, - (0, int(self.max_int), 0), - 2, + array, 'Y', (self.x_active_mid + 20, self.y_active_mid + width + 20), font, 6, (0, int(self.max_int), 0), 2 ) # Display image @@ -284,9 +254,7 @@ def show_calibration_image(self): pattern_params = CalParams(self.size_x, self.size_y) # Add fiducials - for x_loc, y_loc, idx in zip( - pattern_params.x_pixel, pattern_params.y_pixel, pattern_params.index - ): + for x_loc, y_loc, idx in zip(pattern_params.x_pixel, pattern_params.y_pixel, pattern_params.index): # Place fiducial array[y_loc, x_loc, 1] = self.max_int # Place label (offset so label is in view) @@ -301,14 +269,7 @@ def show_calibration_image(self): else: dy = -10 # Draw text - cv.putText( - array, - f'{idx:d}', - (x_loc + dx, y_loc + dy), - cv.FONT_HERSHEY_PLAIN, - 1, - (0, 255, 0), - ) + cv.putText(array, f'{idx:d}', (x_loc + dx, y_loc + dy), cv.FONT_HERSHEY_PLAIN, 1, (0, 255, 0)) # Display with black border self.display_image_in_active_area(array) @@ -326,9 +287,7 @@ def display_image(self, array: np.ndarray) -> None: """ # Check image is RGB if np.ndim(array) != 3 or array.shape[2] != 3: - raise ValueError( - 'Input array must have 3 dimensions and dimension 2 must be length 3.' - ) + raise ValueError('Input array must have 3 dimensions and dimension 2 must be length 3.') # Check array is correct xy shape if array.shape[0] != self.win_size_y or array.shape[1] != self.win_size_x: @@ -359,9 +318,7 @@ def display_image_in_active_area(self, array: np.ndarray) -> None: """ # Check image is RGB if np.ndim(array) != 3 or array.shape[2] != 3: - raise ValueError( - 'Input array must have 3 dimensions and dimension 2 must be length 3.' - ) + raise ValueError('Input array must have 3 dimensions and dimension 2 must be length 3.') # Check array is correct xy shape if array.shape[0] != self.size_y or array.shape[1] != self.size_x: @@ -372,13 +329,8 @@ def display_image_in_active_area(self, array: np.ndarray) -> None: ) # Create black image and place array in correct position - array_out = np.zeros( - (self.win_size_y, self.win_size_x, 3), - dtype=self.display_data['projector_data_type'], - ) - array_out[ - self.y_active_1 : self.y_active_2, self.x_active_1 : self.x_active_2, : - ] = array + array_out = np.zeros((self.win_size_y, self.win_size_x, 3), dtype=self.display_data['projector_data_type']) + array_out[self.y_active_1 : self.y_active_2, self.x_active_1 : self.x_active_2, :] = array # Display image self.display_image(array_out) diff --git a/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py b/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py index d209e00a..a18da57e 100644 --- a/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py +++ b/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py @@ -73,25 +73,7 @@ def __init__(self): 'Blue Shift Y', 'GUI X Position', ] - self.data_types = [ - str, - int, - int, - int, - int, - int, - int, - int, - int, - str, - int, - int, - int, - int, - int, - int, - int, - ] + self.data_types = [str, int, int, int, int, int, int, int, int, str, int, int, int, int, int, int, int] # Declare variables self.projector: ImageProjection @@ -135,60 +117,42 @@ def create_layout(self): self.data_cells.append(e) # Show projector button - self.btn_show_proj = tkinter.Button( - self.root, text='Show Display', command=self.show_projector - ) + self.btn_show_proj = tkinter.Button(self.root, text='Show Display', command=self.show_projector) r += 1 self.btn_show_proj.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Update projector button - self.btn_update_proj = tkinter.Button( - self.root, text='Update All', command=self.update_windows - ) + self.btn_update_proj = tkinter.Button(self.root, text='Update All', command=self.update_windows) r += 1 self.btn_update_proj.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Close display button - self.btn_close_proj = tkinter.Button( - self.root, text='Close Display', command=self.close_projector - ) + self.btn_close_proj = tkinter.Button(self.root, text='Close Display', command=self.close_projector) r += 1 self.btn_close_proj.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Show crosshairs - self.btn_crosshairs = tkinter.Button( - self.root, text='Show Crosshairs', command=self.update_windows - ) + self.btn_crosshairs = tkinter.Button(self.root, text='Show Crosshairs', command=self.update_windows) r += 1 self.btn_crosshairs.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Show axes button - self.btn_axes = tkinter.Button( - self.root, text='Show Display Axes', command=self.show_axes - ) + self.btn_axes = tkinter.Button(self.root, text='Show Display Axes', command=self.show_axes) r += 1 self.btn_axes.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Show calibration image button - self.btn_calib = tkinter.Button( - self.root, - text='Show calibration image', - command=self.show_calibration_image, - ) + self.btn_calib = tkinter.Button(self.root, text='Show calibration image', command=self.show_calibration_image) r += 1 self.btn_calib.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Save as button - self.btn_save = tkinter.Button( - self.root, text='Save as HDF...', command=self.save_as - ) + self.btn_save = tkinter.Button(self.root, text='Save as HDF...', command=self.save_as) r += 1 self.btn_save.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') # Load button - self.btn_load = tkinter.Button( - self.root, text='Load from HDF...', command=self.load_from - ) + self.btn_load = tkinter.Button(self.root, text='Load from HDF...', command=self.load_from) r += 1 self.btn_load.grid(row=r, column=0, pady=2, padx=2, sticky='nesw') @@ -294,9 +258,7 @@ def save_as(self): """ # Get save file name - file = asksaveasfilename( - defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")] - ) + file = asksaveasfilename(defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")]) # Save file as HDF if file != '': @@ -308,9 +270,7 @@ def load_from(self): """ # Get file name - file = askopenfilename( - defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")] - ) + file = askopenfilename(defaultextension='.h5', filetypes=[("HDF5 File", "*.h5")]) # Load file if file != '': @@ -327,9 +287,7 @@ def get_user_data(self): # Gets data from user input boxes and saves in class data = {} - for dtype, name, entry in zip( - self.data_types, self.data_names, self.data_cells - ): + for dtype, name, entry in zip(self.data_types, self.data_names, self.data_cells): data.update({name: dtype(entry.get())}) self.display_data = data @@ -344,15 +302,11 @@ def check_inputs(self) -> bool: """ # Checks inputs are correct - for name, dtype, entry in zip( - self.data_labels, self.data_types, self.data_cells - ): + for name, dtype, entry in zip(self.data_labels, self.data_types, self.data_cells): try: dtype(entry.get()) except ValueError: - messagebox.showerror( - 'Invalid input', f'Input for "{name:s}" must be {dtype}' - ) + messagebox.showerror('Invalid input', f'Input for "{name:s}" must be {dtype}') return False return True diff --git a/opencsp/common/lib/deflectometry/ParamsSlopeSolverAbstract.py b/opencsp/common/lib/deflectometry/ParamsSlopeSolverAbstract.py index 342a5a71..04232322 100644 --- a/opencsp/common/lib/deflectometry/ParamsSlopeSolverAbstract.py +++ b/opencsp/common/lib/deflectometry/ParamsSlopeSolverAbstract.py @@ -7,5 +7,6 @@ class ParamsSlopeSolverAbstract(ABC): """Abstract SlopeSolver input parameters class. Contains parameters common to all surface types. """ + robust_least_squares: bool downsample: int diff --git a/opencsp/common/lib/deflectometry/ParamsSlopeSolverParaboloid.py b/opencsp/common/lib/deflectometry/ParamsSlopeSolverParaboloid.py index 2add6c2e..b0f44d78 100644 --- a/opencsp/common/lib/deflectometry/ParamsSlopeSolverParaboloid.py +++ b/opencsp/common/lib/deflectometry/ParamsSlopeSolverParaboloid.py @@ -5,6 +5,6 @@ @dataclass class ParamsSlopeSolverParaboloid(ParamsSlopeSolverAbstract): - """SlopeSolver input parameters class for parabolic surface type - """ + """SlopeSolver input parameters class for parabolic surface type""" + initial_focal_lengths_xy: tuple[float, float] diff --git a/opencsp/common/lib/deflectometry/ParamsSlopeSolverPlano.py b/opencsp/common/lib/deflectometry/ParamsSlopeSolverPlano.py index 88e9c943..0bb0c461 100644 --- a/opencsp/common/lib/deflectometry/ParamsSlopeSolverPlano.py +++ b/opencsp/common/lib/deflectometry/ParamsSlopeSolverPlano.py @@ -5,5 +5,4 @@ @dataclass class ParamsSlopeSolverPlano(ParamsSlopeSolverAbstract): - """SlopeSolver input parameters class for plano (perfectly flat) surface type - """ + """SlopeSolver input parameters class for plano (perfectly flat) surface type""" diff --git a/opencsp/common/lib/deflectometry/SlopeSolver.py b/opencsp/common/lib/deflectometry/SlopeSolver.py index b88579ea..b6701cdf 100644 --- a/opencsp/common/lib/deflectometry/SlopeSolver.py +++ b/opencsp/common/lib/deflectometry/SlopeSolver.py @@ -128,9 +128,7 @@ def fit_surface(self) -> None: self._plot_debug_plots(idx1, idx2) # Calculate measure point intersection point with existing fitting function - v_meas_pts_surf_int_optic = self.surface.intersect( - u_measure_pixel_pointing_optic, v_optic_cam_optic - ) + v_meas_pts_surf_int_optic = self.surface.intersect(u_measure_pixel_pointing_optic, v_optic_cam_optic) # Calculate design normal at alignment point n_design = self.surface.normal_design_at_align_point() @@ -182,38 +180,28 @@ def solve_slopes(self) -> None: """ # Check alignment has been completed if self._data.trans_alignment is None: - raise ValueError( - 'Initial alignment needs to be completed before final slope fitting (self.fit_surface).' - ) + raise ValueError('Initial alignment needs to be completed before final slope fitting (self.fit_surface).') # Apply alignment transforms about alignment point trans_shift_1 = TransformXYZ.from_V(-self.surface.v_align_point_optic) trans_shift_2 = TransformXYZ.from_V(self.surface.v_align_point_optic) trans: TransformXYZ = trans_shift_2 * self._data.trans_alignment * trans_shift_1 - u_active_pixel_pointing_optic = self.u_active_pixel_pointing_optic.rotate( - trans.R - ) + u_active_pixel_pointing_optic = self.u_active_pixel_pointing_optic.rotate(trans.R) v_screen_points_facet = trans.apply(self.v_screen_points_facet) # Calculate intersection points on optic surface - v_surf_points_facet = self.surface.intersect( - u_active_pixel_pointing_optic, self.surface.v_optic_cam_optic - ) + v_surf_points_facet = self.surface.intersect(u_active_pixel_pointing_optic, self.surface.v_optic_cam_optic) # Calculate pixel slopes (assuming parabolic surface intersection) - slopes_facet_xy = sf2.calc_slopes( - v_surf_points_facet, self.surface.v_optic_cam_optic, v_screen_points_facet - ) + slopes_facet_xy = sf2.calc_slopes(v_surf_points_facet, self.surface.v_optic_cam_optic, v_screen_points_facet) self._data.v_surf_points_facet = v_surf_points_facet self._data.slopes_facet_xy = slopes_facet_xy def _plot_debug_plots(self, idx1: int, idx2: int): # Create figure and axes - if self.debug.slope_solver_single_plot and isinstance( - self.debug.slope_solver_figures, list - ): + if self.debug.slope_solver_single_plot and isinstance(self.debug.slope_solver_figures, list): # Create first figure if needed fig = plt.figure() axes = fig.add_subplot(projection='3d') diff --git a/opencsp/common/lib/deflectometry/Surface2DAbstract.py b/opencsp/common/lib/deflectometry/Surface2DAbstract.py index 34ebc5d9..928e5612 100644 --- a/opencsp/common/lib/deflectometry/Surface2DAbstract.py +++ b/opencsp/common/lib/deflectometry/Surface2DAbstract.py @@ -98,59 +98,33 @@ def plot_intersection_points( """ # Plot intersection points surface axes.plot_trisurf( - *self.v_surf_int_pts_optic[::downsample].data, - edgecolor='none', - alpha=0.5, - linewidth=0, - antialiased=False + *self.v_surf_int_pts_optic[::downsample].data, edgecolor='none', alpha=0.5, linewidth=0, antialiased=False ) # Plot camera rays if camera_ray_length != 0: for ray in self.u_active_pixel_pointing_optic[::downsample]: - x = [ - self.v_optic_cam_optic.x, - self.v_optic_cam_optic.x + ray.x * camera_ray_length, - ] - y = [ - self.v_optic_cam_optic.y, - self.v_optic_cam_optic.y + ray.y * camera_ray_length, - ] - z = [ - self.v_optic_cam_optic.z, - self.v_optic_cam_optic.z + ray.z * camera_ray_length, - ] + x = [self.v_optic_cam_optic.x, self.v_optic_cam_optic.x + ray.x * camera_ray_length] + y = [self.v_optic_cam_optic.y, self.v_optic_cam_optic.y + ray.y * camera_ray_length] + z = [self.v_optic_cam_optic.z, self.v_optic_cam_optic.z + ray.z * camera_ray_length] axes.plot(x, y, z, color='gray', alpha=0.3) # Plot fit normal at align point v_fit = self.normal_fit_at_align_point() pt1 = self.v_align_point_optic pt2 = self.v_align_point_optic + v_fit - axes.plot( - [pt1.x, pt2.x], [pt1.y, pt2.y], [pt1.z, pt2.z], color='k', linestyle='-' - ) + axes.plot([pt1.x, pt2.x], [pt1.y, pt2.y], [pt1.z, pt2.z], color='k', linestyle='-') # Plot design normal at align point v_des = self.normal_design_at_align_point() pt1 = self.v_align_point_optic pt2 = self.v_align_point_optic + v_des - axes.plot( - [pt1.x, pt2.x], [pt1.y, pt2.y], [pt1.z, pt2.z], color='k', linestyle='--' - ) + axes.plot([pt1.x, pt2.x], [pt1.y, pt2.y], [pt1.z, pt2.z], color='k', linestyle='--') # Plot other points - axes.scatter( - *self.v_align_point_optic.data, marker='o', color='r', label='Align Point' - ) + axes.scatter(*self.v_align_point_optic.data, marker='o', color='r', label='Align Point') if plot_camera_screen_points: - axes.scatter( - *self.v_optic_cam_optic.data, marker='*', color='k', label='Camera' - ) - axes.scatter( - *self.v_optic_screen_optic.data, - marker='+', - color='b', - label='Screen Center' - ) + axes.scatter(*self.v_optic_cam_optic.data, marker='*', color='k', label='Camera') + axes.scatter(*self.v_optic_screen_optic.data, marker='+', color='b', label='Screen Center') # Format axes.axis('equal') diff --git a/opencsp/common/lib/deflectometry/Surface2DParabolic.py b/opencsp/common/lib/deflectometry/Surface2DParabolic.py index bcb8801b..78b93e49 100644 --- a/opencsp/common/lib/deflectometry/Surface2DParabolic.py +++ b/opencsp/common/lib/deflectometry/Surface2DParabolic.py @@ -9,12 +9,7 @@ class Surface2DParabolic(Surface2DAbstract): - def __init__( - self, - initial_focal_lengths_xy: tuple[float, float], - robust_least_squares: bool, - downsample: int, - ): + def __init__(self, initial_focal_lengths_xy: tuple[float, float], robust_least_squares: bool, downsample: int): """ Representation of 2D fit parabolic surface. @@ -35,15 +30,7 @@ def __init__( self.slope_fit_poly_order = 1 self.initial_focal_lengths_xy = initial_focal_lengths_xy self.surf_coefs = np.array( - [ - 0, - 0, - 1 / 4 / initial_focal_lengths_xy[0], - 0, - 0, - 1 / 4 / initial_focal_lengths_xy[1], - ], - dtype=float, + [0, 0, 1 / 4 / initial_focal_lengths_xy[0], 0, 0, 1 / 4 / initial_focal_lengths_xy[1]], dtype=float ) self.slope_coefs = np.zeros((2, 3)) @@ -80,9 +67,7 @@ def set_spatial_data( """ # Downsample and save measurement data - self.u_active_pixel_pointing_optic = u_active_pixel_pointing_optic[ - :: self.downsample - ] + self.u_active_pixel_pointing_optic = u_active_pixel_pointing_optic[:: self.downsample] self.v_screen_points_optic = v_screen_points_optic[:: self.downsample] # Save position data @@ -133,15 +118,7 @@ def intersect(self, u_pixel_pointing: Uxyz, v_origin: Vxyz) -> Vxyz: # Solve quadratic formula for ray intersections with parabola a = (A * q**2) + (B * r**2) + (C * q * r) - b = ( - (2 * A * Xc * q) - + (2 * B * Yc * r) - + (C * Xc * r) - + (C * Yc * q) - + (D * q) - + (E * r) - - s - ) + b = (2 * A * Xc * q) + (2 * B * Yc * r) + (C * Xc * r) + (C * Yc * q) + (D * q) + (E * r) - s c = (A * Xc**2) + (B * Yc**2) + (C * Xc * Yc) + (D * Xc) + (E * Yc) + F - Zc a[mask] = np.nan @@ -154,9 +131,7 @@ def intersect(self, u_pixel_pointing: Uxyz, v_origin: Vxyz) -> Vxyz: mean_2 = scale_2.mean() if mean_1 < 0 and mean_2 < 0: - raise ValueError( - 'Camera ray intersection points with parabolic surface are found to be behind camera.' - ) + raise ValueError('Camera ray intersection points with parabolic surface are found to be behind camera.') elif mean_1 > 0 and mean_2 > 0: # Default to use scale 1 scale = scale_1 elif mean_2 > 0: @@ -174,9 +149,7 @@ def intersect(self, u_pixel_pointing: Uxyz, v_origin: Vxyz) -> Vxyz: scale[mask] = z_scale # Calculate intersection points - int_pts = ( - v_origin + u_pixel_pointing.as_Vxyz() * scale[np.newaxis, :] - ) # optic coordinates + int_pts = v_origin + u_pixel_pointing.as_Vxyz() * scale[np.newaxis, :] # optic coordinates return int_pts def normal_design_at_align_point(self) -> Vxyz: @@ -189,12 +162,8 @@ def normal_design_at_align_point(self) -> Vxyz: Surface normal vector. """ - dzdx_design = ( - -self.v_align_point_optic.x[0] / 2 / self.initial_focal_lengths_xy[0] - ) - dzdy_design = ( - -self.v_align_point_optic.y[0] / 2 / self.initial_focal_lengths_xy[1] - ) + dzdx_design = -self.v_align_point_optic.x[0] / 2 / self.initial_focal_lengths_xy[0] + dzdy_design = -self.v_align_point_optic.y[0] / 2 / self.initial_focal_lengths_xy[1] return Uxyz([dzdx_design, dzdy_design, 1]) def normal_fit_at_align_point(self) -> Vxyz: @@ -225,20 +194,14 @@ def calculate_surface_intersect_points(self) -> None: """ # Calculate pixel intersection points with existing fitting function - self.v_surf_int_pts_optic = self.intersect( - self.u_active_pixel_pointing_optic, self.v_optic_cam_optic - ) + self.v_surf_int_pts_optic = self.intersect(self.u_active_pixel_pointing_optic, self.v_optic_cam_optic) def calculate_slopes(self) -> tuple[Vxyz, np.ndarray]: """ Calculate slopes of each measurement point. """ - self.slopes = sf2.calc_slopes( - self.v_surf_int_pts_optic, - self.v_optic_cam_optic, - self.v_screen_points_optic, - ) + self.slopes = sf2.calc_slopes(self.v_surf_int_pts_optic, self.v_optic_cam_optic, self.v_screen_points_optic) def fit_slopes(self) -> None: """ @@ -248,25 +211,15 @@ def fit_slopes(self) -> None: # Fit Nth order surfaces to slope distributions in X and Y if self.robust_least_squares: slope_coefs_x, weights_x = sf2.fit_slope_robust_ls( - self.slope_fit_poly_order, - self.slopes[0], - self.weights.copy(), - self.v_surf_int_pts_optic, + self.slope_fit_poly_order, self.slopes[0], self.weights.copy(), self.v_surf_int_pts_optic ) slope_coefs_y, weights_y = sf2.fit_slope_robust_ls( - self.slope_fit_poly_order, - self.slopes[1], - self.weights.copy(), - self.v_surf_int_pts_optic, + self.slope_fit_poly_order, self.slopes[1], self.weights.copy(), self.v_surf_int_pts_optic ) self.weights = np.array((weights_x, weights_y)).min(0) else: - slope_coefs_x = sf2.fit_slope_ls( - self.slope_fit_poly_order, self.slopes[0], self.v_surf_int_pts_optic - ) - slope_coefs_y = sf2.fit_slope_ls( - self.slope_fit_poly_order, self.slopes[1], self.v_surf_int_pts_optic - ) + slope_coefs_x = sf2.fit_slope_ls(self.slope_fit_poly_order, self.slopes[0], self.v_surf_int_pts_optic) + slope_coefs_y = sf2.fit_slope_ls(self.slope_fit_poly_order, self.slopes[1], self.v_surf_int_pts_optic) # Save slope coefficients self.slope_coefs = np.array((slope_coefs_x, slope_coefs_y)) @@ -284,9 +237,7 @@ def fit_slopes(self) -> None: ) # Calculate z coordinate - z_pt = self.v_align_point_optic.z[0] - sf2.coef_to_points( - self.v_align_point_optic, self.surf_coefs, 2 - ) + z_pt = self.v_align_point_optic.z[0] - sf2.coef_to_points(self.v_align_point_optic, self.surf_coefs, 2) self.surf_coefs[0] = z_pt def rotate_all(self, r_align_step: Rotation) -> None: @@ -299,21 +250,11 @@ def rotate_all(self, r_align_step: Rotation) -> None: Rotation object to rotate all vectors by. """ - self.v_optic_cam_optic = self.v_optic_cam_optic.rotate_about( - r_align_step, self.v_align_point_optic - ) - self.v_screen_points_optic = self.v_screen_points_optic.rotate_about( - r_align_step, self.v_align_point_optic - ) - self.u_active_pixel_pointing_optic = self.u_active_pixel_pointing_optic.rotate( - r_align_step - ) - self.u_measure_pixel_pointing_optic = ( - self.u_measure_pixel_pointing_optic.rotate(r_align_step) - ) - self.v_optic_screen_optic = self.v_optic_screen_optic.rotate_about( - r_align_step, self.v_align_point_optic - ) + self.v_optic_cam_optic = self.v_optic_cam_optic.rotate_about(r_align_step, self.v_align_point_optic) + self.v_screen_points_optic = self.v_screen_points_optic.rotate_about(r_align_step, self.v_align_point_optic) + self.u_active_pixel_pointing_optic = self.u_active_pixel_pointing_optic.rotate(r_align_step) + self.u_measure_pixel_pointing_optic = self.u_measure_pixel_pointing_optic.rotate(r_align_step) + self.v_optic_screen_optic = self.v_optic_screen_optic.rotate_about(r_align_step, self.v_align_point_optic) def shift_all(self, v_align_optic_step: Vxyz) -> None: """ @@ -330,12 +271,7 @@ def shift_all(self, v_align_optic_step: Vxyz) -> None: self.v_optic_screen_optic += v_align_optic_step def save_to_hdf(self, file: str, prefix: str = ''): - data = [ - self.initial_focal_lengths_xy, - self.robust_least_squares, - self.downsample, - 'parabolic' - ] + data = [self.initial_focal_lengths_xy, self.robust_least_squares, self.downsample, 'parabolic'] datasets = [ prefix + 'ParamsSurface/initial_focal_lengths_xy', prefix + 'ParamsSurface/robust_least_squares', @@ -349,8 +285,7 @@ def load_from_hdf(cls, file: str, prefix: str = ''): # Check surface type data = load_hdf5_datasets([prefix + 'ParamsSurface/surface_type'], file) if data['surface_type'] != 'parabolic': - raise ValueError( - f'Surface2DParabolic cannot load surface type, {data["surface_type"]:s}') + raise ValueError(f'Surface2DParabolic cannot load surface type, {data["surface_type"]:s}') # Load datasets = [ diff --git a/opencsp/common/lib/deflectometry/Surface2DPlano.py b/opencsp/common/lib/deflectometry/Surface2DPlano.py index 2a2a0080..000c9709 100644 --- a/opencsp/common/lib/deflectometry/Surface2DPlano.py +++ b/opencsp/common/lib/deflectometry/Surface2DPlano.py @@ -62,9 +62,7 @@ def set_spatial_data( """ # Downsample and save measurement data - self.u_active_pixel_pointing_optic = u_active_pixel_pointing_optic[ - :: self.downsample - ] + self.u_active_pixel_pointing_optic = u_active_pixel_pointing_optic[:: self.downsample] self.v_screen_points_optic = v_screen_points_optic[:: self.downsample] # Save position data @@ -131,20 +129,14 @@ def calculate_surface_intersect_points(self) -> None: """ # Calculate pixel intersection points with existing fitting function - self.v_surf_int_pts_optic = self.intersect( - self.u_active_pixel_pointing_optic, self.v_optic_cam_optic - ) + self.v_surf_int_pts_optic = self.intersect(self.u_active_pixel_pointing_optic, self.v_optic_cam_optic) def calculate_slopes(self) -> tuple[Vxyz, np.ndarray]: """ Calculate slopes of each measurement point. """ - self.slopes = sf2.calc_slopes( - self.v_surf_int_pts_optic, - self.v_optic_cam_optic, - self.v_screen_points_optic, - ) + self.slopes = sf2.calc_slopes(self.v_surf_int_pts_optic, self.v_optic_cam_optic, self.v_screen_points_optic) def fit_slopes(self) -> dict: """ @@ -154,25 +146,15 @@ def fit_slopes(self) -> dict: # Fit Nth order surfaces to slope distributions in X and Y if self.robust_least_squares: slope_coefs_x, weights_x = sf2.fit_slope_robust_ls( - self.slope_fit_poly_order, - self.slopes[0], - self.weights.copy(), - self.v_surf_int_pts_optic, + self.slope_fit_poly_order, self.slopes[0], self.weights.copy(), self.v_surf_int_pts_optic ) slope_coefs_y, weights_y = sf2.fit_slope_robust_ls( - self.slope_fit_poly_order, - self.slopes[1], - self.weights.copy(), - self.v_surf_int_pts_optic, + self.slope_fit_poly_order, self.slopes[1], self.weights.copy(), self.v_surf_int_pts_optic ) self.weights = np.array((weights_x, weights_y)).min(0) else: - slope_coefs_x = sf2.fit_slope_ls( - self.slope_fit_poly_order, self.slopes[0], self.v_surf_int_pts_optic - ) - slope_coefs_y = sf2.fit_slope_ls( - self.slope_fit_poly_order, self.slopes[1], self.v_surf_int_pts_optic - ) + slope_coefs_x = sf2.fit_slope_ls(self.slope_fit_poly_order, self.slopes[0], self.v_surf_int_pts_optic) + slope_coefs_y = sf2.fit_slope_ls(self.slope_fit_poly_order, self.slopes[1], self.v_surf_int_pts_optic) # Save slope coefficients self.slope_coefs = np.array([slope_coefs_x[0], slope_coefs_y[0]]) @@ -181,9 +163,7 @@ def fit_slopes(self) -> dict: self.surf_coefs = np.array([0, slope_coefs_x[0], slope_coefs_x[0]]) # Calculate z coordinate - z_pt = self.v_align_point_optic.z[0] - sf2.coef_to_points( - self.v_align_point_optic, self.surf_coefs, 1 - ) + z_pt = self.v_align_point_optic.z[0] - sf2.coef_to_points(self.v_align_point_optic, self.surf_coefs, 1) self.surf_coefs[0] = z_pt def rotate_all(self, r_align_step: Rotation) -> None: @@ -196,25 +176,15 @@ def rotate_all(self, r_align_step: Rotation) -> None: Rotation object to rotate all vectors by. """ - self.v_optic_cam_optic = self.v_optic_cam_optic.rotate_about( + self.v_optic_cam_optic = self.v_optic_cam_optic.rotate_about(r_align_step, self.v_align_point_optic) + self.v_screen_points_optic = self.v_screen_points_optic.rotate_about(r_align_step, self.v_align_point_optic) + self.u_active_pixel_pointing_optic = self.u_active_pixel_pointing_optic.rotate_about( r_align_step, self.v_align_point_optic ) - self.v_screen_points_optic = self.v_screen_points_optic.rotate_about( - r_align_step, self.v_align_point_optic - ) - self.u_active_pixel_pointing_optic = ( - self.u_active_pixel_pointing_optic.rotate_about( - r_align_step, self.v_align_point_optic - ) - ) - self.u_measure_pixel_pointing_optic = ( - self.u_measure_pixel_pointing_optic.rotate_about( - r_align_step, self.v_align_point_optic - ) - ) - self.v_optic_screen_optic = self.v_optic_screen_optic.rotate_about( + self.u_measure_pixel_pointing_optic = self.u_measure_pixel_pointing_optic.rotate_about( r_align_step, self.v_align_point_optic ) + self.v_optic_screen_optic = self.v_optic_screen_optic.rotate_about(r_align_step, self.v_align_point_optic) def shift_all(self, v_align_optic_step: Vxyz) -> None: """ @@ -233,11 +203,7 @@ def shift_all(self, v_align_optic_step: Vxyz) -> None: self.v_optic_screen_optic += v_align_optic_step def save_to_hdf(self, file: str, prefix: str = ''): - data = [ - self.robust_least_squares, - self.downsample, - 'parabolic' - ] + data = [self.robust_least_squares, self.downsample, 'parabolic'] datasets = [ prefix + 'ParamsSurface/robust_least_squares', prefix + 'ParamsSurface/downsample', @@ -250,13 +216,9 @@ def load_from_hdf(cls, file: str, prefix: str = ''): # Check surface type data = load_hdf5_datasets([prefix + 'ParamsSurface/surface_type'], file) if data['surface_type'] != 'parabolic': - raise ValueError( - f'Surface2DPlano cannot load surface type, {data["surface_type"]:s}') + raise ValueError(f'Surface2DPlano cannot load surface type, {data["surface_type"]:s}') # Load - datasets = [ - prefix + 'ParamsSurface/robust_least_squares', - prefix + 'ParamsSurface/downsample', - ] + datasets = [prefix + 'ParamsSurface/robust_least_squares', prefix + 'ParamsSurface/downsample'] data = load_hdf5_datasets(datasets, file) return cls(**data) diff --git a/opencsp/common/lib/deflectometry/slope_fitting_2d.py b/opencsp/common/lib/deflectometry/slope_fitting_2d.py index 524a538b..dab70052 100644 --- a/opencsp/common/lib/deflectometry/slope_fitting_2d.py +++ b/opencsp/common/lib/deflectometry/slope_fitting_2d.py @@ -4,9 +4,7 @@ from opencsp.common.lib.geometry.Vxyz import Vxyz -def propagate_rays_to_plane( - u_ray: Uxyz, v_origin: Vxyz, v_plane: Vxyz, u_plane: Uxyz -) -> Vxyz: +def propagate_rays_to_plane(u_ray: Uxyz, v_origin: Vxyz, v_plane: Vxyz, u_plane: Uxyz) -> Vxyz: """ Propagates rays to their intersection with a plane @@ -30,9 +28,7 @@ def propagate_rays_to_plane( if type(u_ray) is not Uxyz: raise TypeError('u_ray must be type {} not type {}.'.format(Uxyz, type(u_ray))) if type(u_plane) is not Uxyz: - raise TypeError( - 'u_plane must be type {} not type {}.'.format(Uxyz, type(u_plane)) - ) + raise TypeError('u_plane must be type {} not type {}.'.format(Uxyz, type(u_plane))) v_origin_plane = v_plane - v_origin w_dot = u_plane.dot(v_origin_plane) @@ -42,9 +38,7 @@ def propagate_rays_to_plane( return int_pts -def calc_slopes( - v_surf_int_pts_optic: Vxyz, v_optic_cam_optic: Vxyz, v_screen_points_optic: Vxyz -) -> np.ndarray: +def calc_slopes(v_surf_int_pts_optic: Vxyz, v_optic_cam_optic: Vxyz, v_screen_points_optic: Vxyz) -> np.ndarray: """ Calculate slopes of every measurement point. The normal of the surface is calculated sa the vector between the camera-to-optic vector and the @@ -78,10 +72,7 @@ def calc_slopes( def fit_slope_robust_ls( - slope_fit_poly_order: int, - slope: np.ndarray, - weights: np.ndarray, - v_surf_int_pts_optic: Vxyz, + slope_fit_poly_order: int, slope: np.ndarray, weights: np.ndarray, v_surf_int_pts_optic: Vxyz ) -> np.ndarray: # Check lengths match if slope.size != weights.size or slope.size != len(v_surf_int_pts_optic): @@ -93,9 +84,7 @@ def fit_slope_robust_ls( num_pts = len(v_surf_int_pts_optic) # Create terms - terms = poly_terms( - slope_fit_poly_order, v_surf_int_pts_optic.x, v_surf_int_pts_optic.y - ) + terms = poly_terms(slope_fit_poly_order, v_surf_int_pts_optic.x, v_surf_int_pts_optic.y) # Robust least squares fit c1 = 4.685 @@ -138,17 +127,13 @@ def fit_slope_robust_ls( return coefficients, weights -def fit_slope_ls( - slope_fit_poly_order: int, slope: np.ndarray, v_surf_int_pts_optic: Vxyz -) -> np.ndarray: +def fit_slope_ls(slope_fit_poly_order: int, slope: np.ndarray, v_surf_int_pts_optic: Vxyz) -> np.ndarray: """ Returns best fit slope coefficients to measured slope points using least squared fitting. """ # Create terms - terms = poly_terms( - slope_fit_poly_order, v_surf_int_pts_optic.x, v_surf_int_pts_optic.y - ) + terms = poly_terms(slope_fit_poly_order, v_surf_int_pts_optic.x, v_surf_int_pts_optic.y) # Simple least squares fit # a @ x = b @@ -256,9 +241,6 @@ def optic_screen_dist_error( v_align_point_cam = v_optic_cam_optic - v_align_point_optic dv_align_point_cam = v_align_point_cam * (scale - 1) v_optic_screen_new = v_optic_screen_optic + dv_align_point_cam - error = ( - np.linalg.norm((v_optic_screen_new - v_meas_pts_surf_int_optic).data) - - dist_meas - ) + error = np.linalg.norm((v_optic_screen_new - v_meas_pts_surf_int_optic).data) - dist_meas return np.abs(error) diff --git a/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py b/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py index d7ad19b2..925553a0 100644 --- a/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py +++ b/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py @@ -7,9 +7,7 @@ import numpy as np -from opencsp.common.lib.deflectometry.CalibrationCameraPosition import ( - CalibrationCameraPosition, -) +from opencsp.common.lib.deflectometry.CalibrationCameraPosition import CalibrationCameraPosition from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.geometry.Vxyz import Vxyz from opencsp.common.lib.photogrammetry.photogrammetry import load_image_grayscale @@ -55,17 +53,13 @@ def setUpClass(cls, dir_input: str = None, dir_output: str = None): # Load input data camera = Camera.load_from_hdf(file_camera_sofast) - pts_marker_data = np.loadtxt( - file_point_locations, delimiter=',', dtype=float, skiprows=1 - ) + pts_marker_data = np.loadtxt(file_point_locations, delimiter=',', dtype=float, skiprows=1) pts_xyz_marker = Vxyz(pts_marker_data[:, 2:].T) corner_ids = pts_marker_data[:, 1] image = load_image_grayscale(file_cal_image) # Perform camera position calibraiton - cal_camera_position = CalibrationCameraPosition( - camera, pts_xyz_marker, corner_ids, image - ) + cal_camera_position = CalibrationCameraPosition(camera, pts_xyz_marker, corner_ids, image) cal_camera_position.verbose = verbose cal_camera_position.run_calibration() @@ -73,9 +67,7 @@ def setUpClass(cls, dir_input: str = None, dir_output: str = None): rvec, tvec = cal_camera_position.get_data() # Test data - cls.data_exp = np.loadtxt( - join(dir_output, 'camera_rvec_tvec.csv'), delimiter=',' - ) + cls.data_exp = np.loadtxt(join(dir_output, 'camera_rvec_tvec.csv'), delimiter=',') cls.data_meas = np.vstack((rvec, tvec)) def test_camera_rvec_tvec(self): diff --git a/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py b/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py index beb5e0dd..e7d5f72c 100644 --- a/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py +++ b/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py @@ -8,9 +8,7 @@ from scipy.spatial.transform import Rotation from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display -from opencsp.app.sofast.lib.MeasurementSofastFringe import ( - MeasurementSofastFringe as Measurement, -) +from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement from opencsp.app.sofast.lib.SpatialOrientation import SpatialOrientation from opencsp.common.lib.deflectometry.SlopeSolver import SlopeSolver from opencsp.common.lib.deflectometry.Surface2DParabolic import Surface2DParabolic @@ -24,9 +22,7 @@ class TestSlopeSolver(unittest.TestCase): @classmethod def setUpClass(cls): # Get test data location - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Define test data files for single facet processing cls.data_file_facet = os.path.join(base_dir, 'calculations_facet/data.h5') @@ -79,38 +75,19 @@ def setUpClass(cls): cls.data_slope = ss.get_data() def test_transform_alignment(self): - data = load_hdf5_datasets( - ['DataSofastCalculation/facet/facet_000/trans_alignment'], - self.data_file_facet, - ) + data = load_hdf5_datasets(['DataSofastCalculation/facet/facet_000/trans_alignment'], self.data_file_facet) - np.testing.assert_allclose( - data['trans_alignment'], - self.data_slope.trans_alignment.matrix, - atol=1e-8, - rtol=0, - ) + np.testing.assert_allclose(data['trans_alignment'], self.data_slope.trans_alignment.matrix, atol=1e-8, rtol=0) def test_int_pts(self): - data = load_hdf5_datasets( - ['DataSofastCalculation/facet/facet_000/v_surf_points_facet'], - self.data_file_facet, - ) + data = load_hdf5_datasets(['DataSofastCalculation/facet/facet_000/v_surf_points_facet'], self.data_file_facet) np.testing.assert_allclose( - data['v_surf_points_facet'], - self.data_slope.v_surf_points_facet.data, - atol=1e-8, - rtol=0, + data['v_surf_points_facet'], self.data_slope.v_surf_points_facet.data, atol=1e-8, rtol=0 ) def test_slopes(self): - data = load_hdf5_datasets( - ['DataSofastCalculation/facet/facet_000/slopes_facet_xy'], - self.data_file_facet, - ) - np.testing.assert_allclose( - data['slopes_facet_xy'], self.data_slope.slopes_facet_xy, atol=1e-8, rtol=0 - ) + data = load_hdf5_datasets(['DataSofastCalculation/facet/facet_000/slopes_facet_xy'], self.data_file_facet) + np.testing.assert_allclose(data['slopes_facet_xy'], self.data_slope.slopes_facet_xy, atol=1e-8, rtol=0) if __name__ == '__main__': diff --git a/opencsp/common/lib/deflectometry/test/test_Surface2D.py b/opencsp/common/lib/deflectometry/test/test_Surface2D.py index d34c67eb..c34b1a9c 100644 --- a/opencsp/common/lib/deflectometry/test/test_Surface2D.py +++ b/opencsp/common/lib/deflectometry/test/test_Surface2D.py @@ -1,5 +1,6 @@ """Unit test suite to test Surface2D type classes """ + from os.path import dirname, join import unittest @@ -30,9 +31,7 @@ def test_intersect(self): surface: Surface2DAbstract = test[0] data_exp: Vxyz = test[1] - np.testing.assert_allclose( - surface.v_surf_int_pts_optic.data, data_exp.data - ) + np.testing.assert_allclose(surface.v_surf_int_pts_optic.data, data_exp.data) def test_calculate_slopes(self): """Tests slope calculations.""" @@ -110,9 +109,7 @@ def test_io(self): surf_cur.load_from_hdf(file, prefix) -def generate_2DParabolic() -> ( - tuple[Surface2DParabolic, Vxyz, np.ndarray, np.ndarray, np.ndarray, Uxyz, Uxyz] -): +def generate_2DParabolic() -> tuple[Surface2DParabolic, Vxyz, np.ndarray, np.ndarray, np.ndarray, Uxyz, Uxyz]: """ Generates data for 2DParabolic case """ @@ -120,20 +117,14 @@ def generate_2DParabolic() -> ( initial_focal_lengths_xy = (1.0, 1.0) robust_least_squares = False downsample = 1 - surface = Surface2DParabolic( - initial_focal_lengths_xy, robust_least_squares, downsample - ) + surface = Surface2DParabolic(initial_focal_lengths_xy, robust_least_squares, downsample) # Define reflection geometry x_int = 2 * (np.sqrt(2) - 1) z_int = 0.25 * x_int**2 - u_active_pixel_pointing_optic = Uxyz( - ([-1, 1, 0, 0, 0], [0, 0, -1, 1, 0], [-1, -1, -1, -1, -1]) - ) - v_screen_points_optic = Vxyz( - ([-x_int, x_int, 0, 0, 0], [0, 0, -x_int, x_int, 0], [1, 1, 1, 1, 1]) - ) + u_active_pixel_pointing_optic = Uxyz(([-1, 1, 0, 0, 0], [0, 0, -1, 1, 0], [-1, -1, -1, -1, -1])) + v_screen_points_optic = Vxyz(([-x_int, x_int, 0, 0, 0], [0, 0, -x_int, x_int, 0], [1, 1, 1, 1, 1])) v_optic_cam_optic = Vxyz((0, 0, 1)) u_measure_pixel_pointing_optic = Uxyz((0, 0, -1)) v_align_point_optic = Vxyz((0, 0, 0)) @@ -150,16 +141,8 @@ def generate_2DParabolic() -> ( ) # Define expected data - v_surf_int_pts_exp = Vxyz( - ( - [-x_int, x_int, 0, 0, 0], - [0, 0, -x_int, x_int, 0], - [z_int, z_int, z_int, z_int, 0], - ) - ) - slopes_exp = np.array( - ([-x_int / 2, x_int / 2, 0, 0, 0], [0, 0, -x_int / 2, x_int / 2, 0]) - ) + v_surf_int_pts_exp = Vxyz(([-x_int, x_int, 0, 0, 0], [0, 0, -x_int, x_int, 0], [z_int, z_int, z_int, z_int, 0])) + slopes_exp = np.array(([-x_int / 2, x_int / 2, 0, 0, 0], [0, 0, -x_int / 2, x_int / 2, 0])) slope_coefs_exp = np.array(([0, 0.5, 0], [0, 0, 0.5])) surf_coefs_exp = np.array([0, 0, 0.25, 0, 0, 0.25]) u_design_exp = Uxyz((0.0, 0.0, 1.0)) @@ -175,20 +158,10 @@ def generate_2DParabolic() -> ( surface.fit_slopes() # Pack data - return ( - surface, - v_surf_int_pts_exp, - slopes_exp, - slope_coefs_exp, - surf_coefs_exp, - u_design_exp, - u_fit_exp, - ) + return (surface, v_surf_int_pts_exp, slopes_exp, slope_coefs_exp, surf_coefs_exp, u_design_exp, u_fit_exp) -def generate_2DPlano() -> ( - tuple[Surface2DPlano, Vxyz, np.ndarray, np.ndarray, np.ndarray, Uxyz, Uxyz] -): +def generate_2DPlano() -> tuple[Surface2DPlano, Vxyz, np.ndarray, np.ndarray, np.ndarray, Uxyz, Uxyz]: """ Generates data for 2DPlano case """ @@ -201,16 +174,8 @@ def generate_2DPlano() -> ( x_int = 1 z_int = 0 - u_active_pixel_pointing_optic = Uxyz( - ([-1, 1, 0, 0, 0], [0, 0, -1, 1, 0], [-1, -1, -1, -1, -1]) - ) - v_screen_points_optic = Vxyz( - ( - [-2 * x_int, 2 * x_int, 0, 0, 0], - [0, 0, -2 * x_int, 2 * x_int, 0], - [1, 1, 1, 1, 1], - ) - ) + u_active_pixel_pointing_optic = Uxyz(([-1, 1, 0, 0, 0], [0, 0, -1, 1, 0], [-1, -1, -1, -1, -1])) + v_screen_points_optic = Vxyz(([-2 * x_int, 2 * x_int, 0, 0, 0], [0, 0, -2 * x_int, 2 * x_int, 0], [1, 1, 1, 1, 1])) v_optic_cam_optic = Vxyz((0, 0, 1)) u_measure_pixel_pointing_optic = Uxyz((0, 0, -1)) v_align_point_optic = Vxyz((0, 0, 0)) @@ -227,13 +192,7 @@ def generate_2DPlano() -> ( ) # Define expected data - v_surf_int_pts_exp = Vxyz( - ( - [-x_int, x_int, 0, 0, 0], - [0, 0, -x_int, x_int, 0], - [z_int, z_int, z_int, z_int, 0], - ) - ) + v_surf_int_pts_exp = Vxyz(([-x_int, x_int, 0, 0, 0], [0, 0, -x_int, x_int, 0], [z_int, z_int, z_int, z_int, 0])) slopes_exp = np.array(([0, 0, 0, 0, 0], [0, 0, 0, 0, 0]), dtype=float) slope_coefs_exp = np.array(([0, 0]), dtype=float) surf_coefs_exp = np.array([0, 0, 0], dtype=float) @@ -250,15 +209,7 @@ def generate_2DPlano() -> ( surface.fit_slopes() # Pack data - return ( - surface, - v_surf_int_pts_exp, - slopes_exp, - slope_coefs_exp, - surf_coefs_exp, - u_design_exp, - u_fit_exp, - ) + return (surface, v_surf_int_pts_exp, slopes_exp, slope_coefs_exp, surf_coefs_exp, u_design_exp, u_fit_exp) if __name__ == '__main__': diff --git a/opencsp/common/lib/file/AbstractAttributeParser.py b/opencsp/common/lib/file/AbstractAttributeParser.py index 478c5d64..26c58041 100644 --- a/opencsp/common/lib/file/AbstractAttributeParser.py +++ b/opencsp/common/lib/file/AbstractAttributeParser.py @@ -58,9 +58,7 @@ def has_contents(self) -> bool: pass @abstractmethod - def parse_my_contents( - self, file_path_name_ext: str, raw_contents: str, my_contents: any - ): + def parse_my_contents(self, file_path_name_ext: str, raw_contents: str, my_contents: any): """Parse this attribute parser's specific contents. Parameters @@ -172,9 +170,7 @@ def load(self, attributes_file_path_name_ext: str): attr = am.AttributesManager(self) attr.load(attributes_file_path_name_ext) - def append_contents_for_writing( - self, file_path_name_ext: str, contents: dict[str, any] - ) -> dict[str, any]: + def append_contents_for_writing(self, file_path_name_ext: str, contents: dict[str, any]) -> dict[str, any]: """Add the contents from this parser to be written to an attributes file. If has_contents() == False, then this parser will be skipped. diff --git a/opencsp/common/lib/file/AttributesManager.py b/opencsp/common/lib/file/AttributesManager.py index 3e773999..31f2ead6 100644 --- a/opencsp/common/lib/file/AttributesManager.py +++ b/opencsp/common/lib/file/AttributesManager.py @@ -42,12 +42,8 @@ class AttributesManager: def __init__(self, *parsers: aap.AbstractAttributeParser): input_parsers = {parser.__class__: parser for parser in parsers} - self.specific_parsers: dict[ - type[aap.AbstractAttributeParser], aap.AbstractAttributeParser - ] = input_parsers - self.generic_parsers: dict[ - type[aap.AbstractAttributeParser], aap.AbstractAttributeParser - ] = {} + self.specific_parsers: dict[type[aap.AbstractAttributeParser], aap.AbstractAttributeParser] = input_parsers + self.generic_parsers: dict[type[aap.AbstractAttributeParser], aap.AbstractAttributeParser] = {} # add parsers whose instance's haven't been passed in yet for parser_class in _registered_parser_classes: @@ -103,9 +99,7 @@ def get_parser( return None # more than one subclass found, just return the first one - lt.debug( - f"In AttributesManager.get_parser(): found more than one parser matching parser class {parser_class}" - ) + lt.debug(f"In AttributesManager.get_parser(): found more than one parser matching parser class {parser_class}") return subclass_parsers[0] def set_parser(self, parser: aap.AbstractAttributeParser): @@ -133,9 +127,7 @@ def get_attributes_dict(self, attributes_file_path_name_ext: str = None): for parser in self.parsers: if not parser.has_contents(): continue - contents = parser.append_contents_for_writing( - attributes_file_path_name_ext, contents - ) + contents = parser.append_contents_for_writing(attributes_file_path_name_ext, contents) return contents def save(self, attributes_file_path_name_ext: str, overwrite=False): @@ -164,8 +156,7 @@ def save(self, attributes_file_path_name_ext: str, overwrite=False): if not overwrite: lt.error_and_raise( FileExistsError, - f"Error in AttributesManager.save(): " - + f"file {attributes_file_path_name_ext} already exists!", + f"Error in AttributesManager.save(): " + f"file {attributes_file_path_name_ext} already exists!", ) # collect the contents to be saved @@ -192,7 +183,9 @@ def load(self, attributes_file_path_name_ext: str): # get the raw string value of the file str_contents = "" if not ft.file_exists(attributes_file_path_name_ext): - errstr = f"Error in AttributesManager.load(): attributes file '{attributes_file_path_name_ext}' does not exist!" + errstr = ( + f"Error in AttributesManager.load(): attributes file '{attributes_file_path_name_ext}' does not exist!" + ) lt.debug(errstr) raise FileExistsError(errstr) with open(attributes_file_path_name_ext, 'r') as fin: @@ -202,11 +195,7 @@ def load(self, attributes_file_path_name_ext: str): try: json_contents: dict[str, any] = json.loads(str_contents) except json.decoder.JSONDecodeError: - lt.info( - f"In AttributesManager.load(): failed to parse attributes file {attributes_file_path_name_ext}" - ) + lt.info(f"In AttributesManager.load(): failed to parse attributes file {attributes_file_path_name_ext}") raise for parser in self.parsers: - json_contents = parser.parse_attributes_file( - attributes_file_path_name_ext, str_contents, json_contents - ) + json_contents = parser.parse_attributes_file(attributes_file_path_name_ext, str_contents, json_contents) diff --git a/opencsp/common/lib/file/CsvColumns.py b/opencsp/common/lib/file/CsvColumns.py index 753e4670..4cb7a6f9 100644 --- a/opencsp/common/lib/file/CsvColumns.py +++ b/opencsp/common/lib/file/CsvColumns.py @@ -45,9 +45,7 @@ def parse_data_row(self, data_row: list[str], row_idx=-1): if last_matched_idx < len(data_row) - 1: if row_idx > -1: - lt.debug( - f"Found {len(data_row)-last_matched_idx-1} extra values in row {row_idx}" - ) + lt.debug(f"Found {len(data_row)-last_matched_idx-1} extra values in row {row_idx}") last_column = sorted(self.columns.values(), key=lambda c: c.idx)[-1] cnt = 2 for i in range(last_matched_idx + 1, len(data_row)): @@ -100,9 +98,7 @@ def parse_header( if column.idx < 0: dbg_msg += f" {column.name}: -1\n" else: - dbg_msg += ( - f" {column.name}: {header_row[column.idx]} ({column.idx})\n" - ) + dbg_msg += f" {column.name}: {header_row[column.idx]} ({column.idx})\n" lt.debug(dbg_msg) # check that we found all the columns @@ -122,10 +118,7 @@ def parse_header( found = False others = alternatives[column.name] for alternative in others: - if ( - alternative in self.columns - and self.columns[alternative] != -1 - ): + if alternative in self.columns and self.columns[alternative] != -1: found = True break if found: diff --git a/opencsp/common/lib/file/test/test_AttributesManager.py b/opencsp/common/lib/file/test/test_AttributesManager.py index e5d74570..bb654e11 100644 --- a/opencsp/common/lib/file/test/test_AttributesManager.py +++ b/opencsp/common/lib/file/test/test_AttributesManager.py @@ -24,9 +24,7 @@ def attributes_key(self) -> str: def has_contents(self) -> bool: return True - def parse_my_contents( - self, file_path_name_ext: str, raw_contents: str, my_contents: any - ): + def parse_my_contents(self, file_path_name_ext: str, raw_contents: str, my_contents: any): self.strval = my_contents["strval"] self.listval = my_contents["listval"] diff --git a/opencsp/common/lib/geo/lon_lat_nsttf.py b/opencsp/common/lib/geo/lon_lat_nsttf.py index 55d7cc51..700b7098 100644 --- a/opencsp/common/lib/geo/lon_lat_nsttf.py +++ b/opencsp/common/lib/geo/lon_lat_nsttf.py @@ -10,9 +10,7 @@ # COORDINATE SYSTEM ORIGIN -LON_NSTTF_ORIGIN_DEG = ( - -106.509606 -) # Six decimal places correspond to about 11 cm resolution. +LON_NSTTF_ORIGIN_DEG = -106.509606 # Six decimal places correspond to about 11 cm resolution. LAT_NSTTF_ORIGIN_DEG = 34.962276 # LON_NSTTF_ORIGIN: float = np.deg2rad(LON_NSTTF_ORIGIN_DEG) diff --git a/opencsp/common/lib/geometry/EdgeXY.py b/opencsp/common/lib/geometry/EdgeXY.py index 09003d1e..36acddaa 100644 --- a/opencsp/common/lib/geometry/EdgeXY.py +++ b/opencsp/common/lib/geometry/EdgeXY.py @@ -4,9 +4,7 @@ class EdgeXY: - def __init__( - self, vertices: Vxy, curve_data: dict = {'type': 'line'}, closed: bool = False - ): + def __init__(self, vertices: Vxy, curve_data: dict = {'type': 'line'}, closed: bool = False): """ Representation of a 2D edge. @@ -26,13 +24,9 @@ def __init__( """ # Check inputs if len(vertices) != 2: - raise ValueError( - 'Input vertices must have length 2, not {:d}.'.format(len(vertices)) - ) + raise ValueError('Input vertices must have length 2, not {:d}.'.format(len(vertices))) if closed is True: - raise NotImplementedError( - 'Curves that are not closed are not currently supported.' - ) + raise NotImplementedError('Curves that are not closed are not currently supported.') # Save properties self._vertices = vertices @@ -43,9 +37,7 @@ def __init__( if curve_data['type'] == 'line': self._curve = LineXY.from_two_points(vertices[0], vertices[1]) else: - raise ValueError( - 'Curve type {:s} not currently supported.'.format(curve_data['type']) - ) + raise ValueError('Curve type {:s} not currently supported.'.format(curve_data['type'])) @property def vertices(self) -> Vxy: diff --git a/opencsp/common/lib/geometry/FunctionXYContinuous.py b/opencsp/common/lib/geometry/FunctionXYContinuous.py index c08a3e77..b7b8bf19 100644 --- a/opencsp/common/lib/geometry/FunctionXYContinuous.py +++ b/opencsp/common/lib/geometry/FunctionXYContinuous.py @@ -69,12 +69,7 @@ def draw(self, view: View3d, functionXY_style): for iy, y in enumerate(sorted(self.y_domain)): arr[iy, ix] = self.value_at(x, y) # A = self.as_callable()(X,Y) - extent = [ - min(self.x_domain), - max(self.x_domain), - min(self.y_domain), - max(self.y_domain), - ] + extent = [min(self.x_domain), max(self.x_domain), min(self.y_domain), max(self.y_domain)] # view.pcolormesh(list(self.x_domain), list(self.y_domain), arr, colorbar=True, cmap='jet', ) view.imshow(arr, colorbar=True, cmap='jet') @@ -90,9 +85,7 @@ def from_array(cls, x_domain: np.ndarray, y_domain: np.ndarray, values: np.ndarr values: 2d array, the values that will be returned when x and y are used """ if len(values) != len(y_domain) or len(values[0]) != len(x_domain): - raise ValueError( - "Size of the domain does not match size of the value array." - ) + raise ValueError("Size of the domain does not match size of the value array.") else: d = dict() for iy, y in enumerate(y_domain): diff --git a/opencsp/common/lib/geometry/FunctionXYDiscrete.py b/opencsp/common/lib/geometry/FunctionXYDiscrete.py index 6c769c7d..6a558abf 100644 --- a/opencsp/common/lib/geometry/FunctionXYDiscrete.py +++ b/opencsp/common/lib/geometry/FunctionXYDiscrete.py @@ -24,9 +24,7 @@ def __init__(self, values: dict[tuple[float, float], float]) -> None: # def interpolate() -> FunctionXYAnalytic: # ... - def value_at( - self, x: float | Iterable[float], y: float | Iterable[float] - ) -> float | np.ndarray[float]: + def value_at(self, x: float | Iterable[float], y: float | Iterable[float]) -> float | np.ndarray[float]: if isinstance(x, Iterable) and isinstance(y, Iterable): if len(x) != len(y): raise ValueError( @@ -51,12 +49,7 @@ def draw(self, view: View3d, functionXY_style): for iy, y in enumerate(sorted(self.y_domain)): arr[iy, ix] = self.value_at(x, y) # A = self.as_callable()(X,Y) - extent = [ - min(self.x_domain), - max(self.x_domain), - min(self.y_domain), - max(self.y_domain), - ] + extent = [min(self.x_domain), max(self.x_domain), min(self.y_domain), max(self.y_domain)] # view.pcolormesh(list(self.x_domain), list(self.y_domain), arr, colorbar=True, cmap='jet', ) view.imshow(arr, colorbar=True, cmap='jet') @@ -72,9 +65,7 @@ def from_array(cls, x_domain: np.ndarray, y_domain: np.ndarray, values: np.ndarr values: 2d array, the values that will be returned when x and y are used """ if len(values) != len(y_domain) or len(values[0]) != len(x_domain): - raise ValueError( - "Size of the domain does not match size of the value array." - ) + raise ValueError("Size of the domain does not match size of the value array.") else: d = dict() for iy, y in enumerate(y_domain): diff --git a/opencsp/common/lib/geometry/FunctionXYGrid.py b/opencsp/common/lib/geometry/FunctionXYGrid.py index fb331fec..11811b39 100644 --- a/opencsp/common/lib/geometry/FunctionXYGrid.py +++ b/opencsp/common/lib/geometry/FunctionXYGrid.py @@ -21,9 +21,7 @@ class FunctionXYGrid(FunctionXYAbstract): in the form (smallest x, largest x, smallest y, largest y) """ - def __init__( - self, values: np.ndarray, limits: tuple[float, float, float, float] = None - ) -> None: + def __init__(self, values: np.ndarray, limits: tuple[float, float, float, float] = None) -> None: """Represents a discrete function of equispaced points in its domain. Defined by an array and the location of the 4 corners of that array in the funciton space. @@ -55,9 +53,7 @@ def __init__( # ... # override - def value_at( - self, x: float | Iterable[float], y: float | Iterable[float] - ) -> float | np.ndarray[float]: + def value_at(self, x: float | Iterable[float], y: float | Iterable[float]) -> float | np.ndarray[float]: # array case if isinstance(x, Iterable) or isinstance(y, Iterable): if len(x) != len(y): @@ -85,16 +81,12 @@ def in_domain(self, x: float, y: float) -> bool: # override def __getstate__(self) -> dict: """Returns a serializable object for pickleing.""" - raise NotImplementedError( - "__getstate__ has not been implemented for FunctionXYGrid" - ) + raise NotImplementedError("__getstate__ has not been implemented for FunctionXYGrid") # override def __setstate__(self, state: dict): """Takes in __getstate__(self)'s output to recreate the object `self` that was passed into __getstate__""" - raise NotImplementedError( - "__setstate__ has not been implemented for FunctionXYGrid" - ) + raise NotImplementedError("__setstate__ has not been implemented for FunctionXYGrid") def to_index_values(self, x: float, y: float) -> tuple[int, int]: x_index = (x - self.x0) / self.x_step @@ -103,9 +95,7 @@ def to_index_values(self, x: float, y: float) -> tuple[int, int]: return False return (int(x_index), int(y_index)) - def draw( - self, view: View3d, functionXY_style: rcfxy.RenderControlFunctionXY = None - ): + def draw(self, view: View3d, functionXY_style: rcfxy.RenderControlFunctionXY = None): if functionXY_style == None: functionXY_style = rcfxy.RenderControlFunctionXY() diff --git a/opencsp/common/lib/geometry/Intersection.py b/opencsp/common/lib/geometry/Intersection.py index 4fb17188..0f90462f 100644 --- a/opencsp/common/lib/geometry/Intersection.py +++ b/opencsp/common/lib/geometry/Intersection.py @@ -24,12 +24,8 @@ from opencsp.common.lib.geometry.Uxyz import Uxyz from opencsp.common.lib.geometry.Vxyz import Vxyz from opencsp.common.lib.render.View3d import View3d -from opencsp.common.lib.render_control.RenderControlPointSeq import ( - RenderControlPointSeq, -) -from opencsp.common.lib.render_control.RenderControlRayTrace import ( - RenderControlRayTrace, -) +from opencsp.common.lib.render_control.RenderControlPointSeq import RenderControlPointSeq +from opencsp.common.lib.render_control.RenderControlRayTrace import RenderControlRayTrace from opencsp.common.lib.tool.hdf5_tools import load_hdf5_datasets, save_hdf5_datasets from opencsp.common.lib.tool.typing_tools import strict_types @@ -42,9 +38,7 @@ def __init__(self, intersection_points: Pxyz): def plane_intersect_from_ray_trace( cls, ray_trace: RayTrace, - plane: tuple[ - Pxyz, Uxyz - ], # used to be --> plane_point: Pxyz, plane_normal_vector: Uxyz, + plane: tuple[Pxyz, Uxyz], # used to be --> plane_point: Pxyz, plane_normal_vector: Uxyz, epsilon: float = 1e-6, save_in_file: bool = False, save_name: str = None, @@ -103,9 +97,7 @@ def plane_intersect_from_ray_trace( # filter out points that miss the plane if verbose: print("filtering out missed vectors") - filtered_intersec_points = Pxyz.merge( - list(filter(lambda vec: not vec.hasnan(), intersection_points)) - ) + filtered_intersec_points = Pxyz.merge(list(filter(lambda vec: not vec.hasnan(), intersection_points))) # if verbose: # print("Rotating.") @@ -137,9 +129,7 @@ def plane_intersect_from_ray_trace( # TODO tjlarki: for maddie, make this better def _from_ray_trace_vec_maddie( lines: tuple[Pxyz, Vxyz], - plane: tuple[ - Pxyz, Uxyz - ], # used to be --> plane_point: Pxyz, plane_normal_vector: Uxyz, + plane: tuple[Pxyz, Uxyz], # used to be --> plane_point: Pxyz, plane_normal_vector: Uxyz, epsilon: float = 1e-6, verbose: bool = False, ): @@ -182,7 +172,9 @@ def _from_ray_trace_vec_maddie( # filter out points that miss the plane if verbose: print("filtering out missed vectors") - filtered_intersec_points = intersection_points # Pxyz.merge(list(filter(lambda vec: not vec.hasnan(),intersection_points))) + filtered_intersec_points = ( + intersection_points # Pxyz.merge(list(filter(lambda vec: not vec.hasnan(),intersection_points))) + ) if verbose: print("Rotating.") @@ -202,11 +194,7 @@ def _from_ray_trace_vec_maddie( def from_hdf(cls, filename: str, intersection_name: str = "000"): # get the names of the batches to loop through intersection_points = Pxyz( - list( - load_hdf5_datasets( - [f"Intersection_{intersection_name}/Points"], filename - ).values() - )[0] + list(load_hdf5_datasets([f"Intersection_{intersection_name}/Points"], filename).values())[0] ) return Intersection(intersection_points) @@ -215,18 +203,13 @@ def empty_intersection(cls): return cls(Pxyz.empty()) def __add__(self, intersection: 'Intersection'): - return Intersection( - self.intersection_points.concatenate(intersection.intersection_points) - ) + return Intersection(self.intersection_points.concatenate(intersection.intersection_points)) def __len__(self): return len(self.intersection_points) def save_to_hdf(self, hdf_filename: str, intersection_name: str = "000"): - datasets = [ - f"Intersection_{intersection_name}/Points", - f"Intersection_{intersection_name}/Metatdata", - ] + datasets = [f"Intersection_{intersection_name}/Points", f"Intersection_{intersection_name}/Metatdata"] data = [self.intersection_points.data, "Placeholder"] save_hdf5_datasets(data, datasets, hdf_filename) @@ -251,9 +234,7 @@ def to_flux_mapYZ(self, bins: int, resolution_type: str = None) -> FunctionXYGri pyz = Pxy([self.intersection_points.y, self.intersection_points.z]) return Intersection._Pxy_to_flux_map(pyz, bins, resolution_type) - def _Pxy_to_flux_map( - points: Pxy, bins: int, resolution_type: str = "pixelX" - ) -> FunctionXYGrid: + def _Pxy_to_flux_map(points: Pxy, bins: int, resolution_type: str = "pixelX") -> FunctionXYGrid: xbins = bins x_low, x_high = min(points.x), max(points.x) y_low, y_high = min(points.y), max(points.y) @@ -278,8 +259,6 @@ def _Pxy_to_flux_map( def draw(self, view: View3d, style: RenderControlPointSeq = None): view.draw_single_Pxyz(self.intersection_points, style) - def draw_subset( - self, view: View3d, count: int, points_style: RenderControlPointSeq = None - ): + def draw_subset(self, view: View3d, count: int, points_style: RenderControlPointSeq = None): for i in np.floor(np.linspace(0, len(self.intersection_points) - 1, count)): view.draw_single_Pxyz(self.intersection_points[int(i)]) diff --git a/opencsp/common/lib/geometry/LineXY.py b/opencsp/common/lib/geometry/LineXY.py index 9c48dfc6..90d44bcf 100644 --- a/opencsp/common/lib/geometry/LineXY.py +++ b/opencsp/common/lib/geometry/LineXY.py @@ -37,14 +37,7 @@ def __init__(self, A: float, B: float, C: float): self.C = C / mag def __repr__(self): - return ( - '2D Line: ' - + self.A.__repr__() - + ', ' - + self.B.__repr__() - + ', ' - + self.C.__repr__() - ) + return '2D Line: ' + self.A.__repr__() + ', ' + self.B.__repr__() + ', ' + self.C.__repr__() @property def n_vec(self) -> Vxy: @@ -105,9 +98,7 @@ def fit_from_points(cls, Pxy: Vxy, seed: int = 1, neighbor_dist: float = 1.0): n = len(Pxy) thresh = int(0.99 * n) if n <= 15: - raise ValueError( - f'To fit line from points, must have > 15 points, but {n:d} were given.' - ) + raise ValueError(f'To fit line from points, must have > 15 points, but {n:d} were given.') # Fit from random combinations of points, keeping the best best_active = 0 @@ -115,10 +106,7 @@ def fit_from_points(cls, Pxy: Vxy, seed: int = 1, neighbor_dist: float = 1.0): # Find two separate points i1 = np.floor(rs.rand() * n).astype(int) i2 = np.floor(rs.rand() * n).astype(int) - while ( - Pxy.data[0, i1] == Pxy.data[0, i2] - and Pxy.data[1, i1] == Pxy.data[1, i2] - ): + while Pxy.data[0, i1] == Pxy.data[0, i2] and Pxy.data[1, i1] == Pxy.data[1, i2]: i2 = np.floor(rs.rand() * n).astype(int) # Fit line to two poins diff --git a/opencsp/common/lib/geometry/LoopXY.py b/opencsp/common/lib/geometry/LoopXY.py index 569e49aa..ff287175 100644 --- a/opencsp/common/lib/geometry/LoopXY.py +++ b/opencsp/common/lib/geometry/LoopXY.py @@ -79,12 +79,8 @@ def _vertex_to_vertex_angles(self): for idx1 in range(self.num_edges): idx2 = np.mod(idx1 + 1, self.num_edges) # Calcualte edge vectors - V_1 = ( - self._edges[idx1]._vertices[1] - self._edges[idx1]._vertices[0] - ).normalize() - V_2 = ( - self._edges[idx2]._vertices[1] - self._edges[idx2]._vertices[0] - ).normalize() + V_1 = (self._edges[idx1]._vertices[1] - self._edges[idx1]._vertices[0]).normalize() + V_2 = (self._edges[idx2]._vertices[1] - self._edges[idx2]._vertices[0]).normalize() # Calculate cross product cross_prod_data[idx2] = V_1.cross(V_2)[0] @@ -127,13 +123,7 @@ def from_lines(cls, lines: list[LineXY]): edges = [] for idx1 in range(len(vertices)): idx2 = np.mod(idx1 + 1, len(vertices)) - edges.append( - EdgeXY( - vertices=vertices[[idx1, idx2]], - curve_data={'type': 'line'}, - closed=False, - ) - ) + edges.append(EdgeXY(vertices=vertices[[idx1, idx2]], curve_data={'type': 'line'}, closed=False)) return cls(edges=edges) @@ -161,20 +151,12 @@ def from_vertices(cls, vertices: Vxy): edges = [] for idx1 in range(len(vertices)): idx2 = np.mod(idx1 + 1, len(vertices)) - edges.append( - EdgeXY( - vertices=vertices[[idx1, idx2]], - curve_data={'type': 'line'}, - closed=False, - ) - ) + edges.append(EdgeXY(vertices=vertices[[idx1, idx2]], curve_data={'type': 'line'}, closed=False)) return cls(edges=edges) @classmethod - def from_rectangle( - cls, x: float, y: float, width: float, height: float - ) -> 'LoopXY': + def from_rectangle(cls, x: float, y: float, width: float, height: float) -> 'LoopXY': """Returns rectangular loop Parameters @@ -451,15 +433,11 @@ def intersect_line(self, line: LineXY): intersect_xs.append(intersect_point.x[0]) intersect_ys.append(intersect_point.y[0]) else: - raise NotImplementedError( - "Intersections of non-line edges not yet supported in this method" - ) + raise NotImplementedError("Intersections of non-line edges not yet supported in this method") intersect_points = Vxy((intersect_xs, intersect_ys)) # Limit to internal (or border) intersections - intersect_points = intersect_points[ - self.is_inside_or_on_border(intersect_points) - ] + intersect_points = intersect_points[self.is_inside_or_on_border(intersect_points)] # De-duplicate intersections keep_xs: list[float] = [] diff --git a/opencsp/common/lib/geometry/ReferenceFrame.py b/opencsp/common/lib/geometry/ReferenceFrame.py index 64ac2e1a..77860bb5 100644 --- a/opencsp/common/lib/geometry/ReferenceFrame.py +++ b/opencsp/common/lib/geometry/ReferenceFrame.py @@ -3,9 +3,7 @@ class ReferenceFrame: Tracks the displacement and rotation of different reference frames """ - def __init__( - self, dx: float, dy: float, dz: float, rx: float, ry: float, rz: float - ) -> None: + def __init__(self, dx: float, dy: float, dz: float, rx: float, ry: float, rz: float) -> None: self.dx = dx self.dy = dy self.dz = dz diff --git a/opencsp/common/lib/geometry/RegionXY.py b/opencsp/common/lib/geometry/RegionXY.py index a247ebfa..76dfdab4 100644 --- a/opencsp/common/lib/geometry/RegionXY.py +++ b/opencsp/common/lib/geometry/RegionXY.py @@ -34,9 +34,7 @@ def add_loop(self, loop: LoopXY) -> None: Loop to append to the region. """ - raise NotImplementedError( - 'Cannot add more than one loop to a region currently.' - ) + raise NotImplementedError('Cannot add more than one loop to a region currently.') def as_mask(self, vx: np.ndarray, vy: np.ndarray): """ @@ -111,12 +109,7 @@ def edge_sample(self, count: int): """Returns a Vxy of count points per edge per loop defining the region""" return Vxy.merge([loop.edge_sample(count) for loop in self.loops]) - def points_sample( - self, - resolution: int, - resolution_type: str = 'pixelX', - random_seed: int | None = None, - ) -> Pxy: + def points_sample(self, resolution: int, resolution_type: str = 'pixelX', random_seed: int | None = None) -> Pxy: """Returns a Pxy object of points sampled from inside the region. Parameters @@ -152,12 +145,7 @@ def points_sample( x_vals = np.linspace(left + xedge, right - xedge, x_pixel_res) y_vals = np.linspace(bottom + yedge, top - yedge, y_pixel_res) # all_points is every combination of x and y - all_points = Pxy( - [ - [x for x in x_vals for _ in y_vals], - [y for _ in x_vals for y in y_vals], - ] - ) + all_points = Pxy([[x for x in x_vals for _ in y_vals], [y for _ in x_vals for y in y_vals]]) elif resolution_type == 'pixelX': x_pixel_res = resolution @@ -168,17 +156,10 @@ def points_sample( x_vals = np.linspace(left + xedge, right - xedge, x_pixel_res) y_vals = np.linspace(bottom + yedge, top - yedge, y_pixel_res) # all_points is every combination of x and y - all_points = Pxy( - [ - [x for x in x_vals for _ in y_vals], - [y for _ in x_vals for y in y_vals], - ] - ) + all_points = Pxy([[x for x in x_vals for _ in y_vals], [y for _ in x_vals for y in y_vals]]) else: - raise ValueError( - f'Given resolution_type, {resolution_type}, not supported.' - ) + raise ValueError(f'Given resolution_type, {resolution_type}, not supported.') filtered_points = self.filter_points(all_points) return filtered_points @@ -210,9 +191,7 @@ def axis_aligned_bounding_box(self) -> tuple[float, float, float, float]: if len(self.loops) == 1: return self.loops[0].axis_aligned_bounding_box() else: - raise NotImplementedError( - 'RegionXY.axis_aligned_bounding_box is only implemented for single loop regions.' - ) + raise NotImplementedError('RegionXY.axis_aligned_bounding_box is only implemented for single loop regions.') # alias for easy use axis_aligned_bounding_box() aabbox = axis_aligned_bounding_box diff --git a/opencsp/common/lib/geometry/TransformXYZ.py b/opencsp/common/lib/geometry/TransformXYZ.py index 647f47b6..7281b1bf 100644 --- a/opencsp/common/lib/geometry/TransformXYZ.py +++ b/opencsp/common/lib/geometry/TransformXYZ.py @@ -28,9 +28,7 @@ def __repr__(self): def __mul__(self, T): # Check input type if not isinstance(T, TransformXYZ): - raise TypeError( - f'Type, {type(self)}, cannot be multipled by type, {type(T)}.' - ) + raise TypeError(f'Type, {type(self)}, cannot be multipled by type, {type(T)}.') return TransformXYZ(self._matrix @ T._matrix) diff --git a/opencsp/common/lib/geometry/TranslationXYZ.py b/opencsp/common/lib/geometry/TranslationXYZ.py index e51545f5..36846452 100644 --- a/opencsp/common/lib/geometry/TranslationXYZ.py +++ b/opencsp/common/lib/geometry/TranslationXYZ.py @@ -6,11 +6,7 @@ class TranslationXYZ: def __init__(self) -> None: - warn( - 'TranslationXYZ is deprecated. Replace with Vxyz.', - DeprecationWarning, - stacklevel=2, - ) + warn('TranslationXYZ is deprecated. Replace with Vxyz.', DeprecationWarning, stacklevel=2) self.trans_mtrx = np.zeros((3, 1)) def from_vector(v: Vxyz): diff --git a/opencsp/common/lib/geometry/Vxy.py b/opencsp/common/lib/geometry/Vxy.py index df9dbef3..0e572497 100644 --- a/opencsp/common/lib/geometry/Vxy.py +++ b/opencsp/common/lib/geometry/Vxy.py @@ -22,9 +22,7 @@ def __init__(self, data, dtype=float): if np.ndim(data) not in [1, 2]: raise ValueError('Input data must have 1 or 2 dimensions if ndarray.') elif np.ndim(data) == 2 and data.shape[0] != 2: - raise ValueError( - 'First dimension of 2-dimensional data must be length 2 if ndarray.' - ) + raise ValueError('First dimension of 2-dimensional data must be length 2 if ndarray.') elif len(data) != 2: raise ValueError('Input data must have length 2.') @@ -85,9 +83,7 @@ def __mul__(self, data_in): elif type(data_in) is np.ndarray: return self._from_data(self._data * data_in) else: - raise TypeError( - 'Vxy cannot be multipled by type, {}.'.format(type(data_in)) - ) + raise TypeError('Vxy cannot be multipled by type, {}.'.format(type(data_in))) def __getitem__(self, key): # Check that only one dimension is being indexed @@ -212,9 +208,7 @@ def rotate_in_place(self, R: np.ndarray) -> None: if type(R) is not np.ndarray: raise TypeError('Rotation must be type ndarray, not {}'.format(type(R))) if R.shape != (2, 2): - raise ValueError( - 'Rotation matrix must be shape (2, 2), not {}'.format(R.shape) - ) + raise ValueError('Rotation matrix must be shape (2, 2), not {}'.format(R.shape)) self._data = R @ self._data diff --git a/opencsp/common/lib/geometry/Vxyz.py b/opencsp/common/lib/geometry/Vxyz.py index e4bf008e..b9bd8dc0 100644 --- a/opencsp/common/lib/geometry/Vxyz.py +++ b/opencsp/common/lib/geometry/Vxyz.py @@ -29,9 +29,7 @@ def __init__(self, data, dtype=float): if np.ndim(data) not in [1, 2]: raise ValueError('Input data must have 1 or 2 dimensions if ndarray.') elif np.ndim(data) == 2 and data.shape[0] != 3: - raise ValueError( - 'First dimension of 2-dimensional data must be length 3 if ndarray.' - ) + raise ValueError('First dimension of 2-dimensional data must be length 3 if ndarray.') elif len(data) != 3: raise ValueError('Input data must have length 3.') @@ -308,9 +306,7 @@ def cross(self, V): # Check inputs self._check_is_Vxyz(V) if not (len(self) == 1 or len(V) == 1 or len(self) == len(V)): - raise ValueError( - 'Operands must be same same length, or at least one must have length 1.' - ) + raise ValueError('Operands must be same same length, or at least one must have length 1.') # Calculate return self._from_data(np.cross(self._data.T, V.data.T).T) diff --git a/opencsp/common/lib/geometry/angle.py b/opencsp/common/lib/geometry/angle.py index bf8e771d..0433f72e 100644 --- a/opencsp/common/lib/geometry/angle.py +++ b/opencsp/common/lib/geometry/angle.py @@ -70,9 +70,7 @@ def normalize(angles: npt.NDArray[np.float_] | Iterable) -> npt.NDArray[np.float pass -def normalize( - angle_or_angles: float | npt.NDArray[np.float_] | Iterable, -) -> float | npt.NDArray[np.float_]: +def normalize(angle_or_angles: float | npt.NDArray[np.float_] | Iterable) -> float | npt.NDArray[np.float_]: """Adjusts the given angle_or_angles to be in the range 0-2Ï€. Note that because this function operates on floating point math, your answer is not guaranteed to be exact (for example, a value diff --git a/opencsp/common/lib/geometry/geometry_2d.py b/opencsp/common/lib/geometry/geometry_2d.py index 783f1c85..6d4ed568 100644 --- a/opencsp/common/lib/geometry/geometry_2d.py +++ b/opencsp/common/lib/geometry/geometry_2d.py @@ -18,11 +18,7 @@ def homogeneous_line(xy1, xy2): - warn( - 'geometry_2d.homogeneous_line is deprecated. Use LineXY instead.', - DeprecationWarning, - stacklevel=2, - ) + warn('geometry_2d.homogeneous_line is deprecated. Use LineXY instead.', DeprecationWarning, stacklevel=2) # Returns homogeneous line coeffcients, in normalized form. x1 = xy1[0] y1 = xy1[1] @@ -33,11 +29,7 @@ def homogeneous_line(xy1, xy2): C = (x2 * y1) - (x1 * y2) n = np.sqrt((A * A) + (B * B)) if n == 0: - print( - '\nERROR: In homogeneous_line, degenerate case encountered.', - DeprecationWarning, - stacklevel=2, - ) + print('\nERROR: In homogeneous_line, degenerate case encountered.', DeprecationWarning, stacklevel=2) print(' xy1 =', xy1) print(' xy2 =', xy2) print('\n') @@ -50,9 +42,7 @@ def homogeneous_line(xy1, xy2): def flip_homogeneous_line(line): warn( - 'geometry_2d.flip_homogeneous_line is deprecated. Use LineXY.flip() instead.', - DeprecationWarning, - stacklevel=2, + 'geometry_2d.flip_homogeneous_line is deprecated. Use LineXY.flip() instead.', DeprecationWarning, stacklevel=2 ) # Reverse the sense of the homogeneous line. return [-x for x in line] @@ -132,11 +122,7 @@ def intersect_lines(line1, line2): def shift_x(ray, dx): - warn( - 'geometry_2d.shift_x is deprecated. Use Vxy.__add__() instead.', - DeprecationWarning, - stacklevel=2, - ) + warn('geometry_2d.shift_x is deprecated. Use Vxy.__add__() instead.', DeprecationWarning, stacklevel=2) x0 = ray[0][0] y0 = ray[0][1] x1 = ray[1][0] @@ -173,9 +159,7 @@ def draw_clip_xy_box(view, clip_xy_box): p_max = xy_max[0] y_max = xy_max[1] xy_list = [[p_min, y_min], [p_max, y_min], [p_max, y_max], [p_min, y_max]] - view.draw_pq_list( - xy_list, close=True, style=rcps.outline(color='r'), label='Clip Box' - ) + view.draw_pq_list(xy_list, close=True, style=rcps.outline(color='r'), label='Clip Box') def clip_line_to_xy_box(line, clip_xy_box): @@ -214,9 +198,7 @@ def clip_line_to_xy_box(line, clip_xy_box): # Check result. if len(clip_points) != 2: print( - 'WARNING: In clip_line_to_xy_box(), unexpected result with ', - len(clip_points), - ' clip points encountered.', + 'WARNING: In clip_line_to_xy_box(), unexpected result with ', len(clip_points), ' clip points encountered.' ) print(' line = ', line) print(' clip_xy_box = ', clip_xy_box) @@ -226,9 +208,7 @@ def clip_line_to_xy_box(line, clip_xy_box): def extend_ray(ray, clip_xy_box, fail_if_null_result=True): warn( - 'geometry_2d.extend_ray is deprecated. Should be migrated to another library.', - DeprecationWarning, - stacklevel=2, + 'geometry_2d.extend_ray is deprecated. Should be migrated to another library.', DeprecationWarning, stacklevel=2 ) xy0 = ray[0] xy1 = ray[1] @@ -328,12 +308,7 @@ def best_fit_line_segment_A(xy_seq): assert False if dx > dy: pfit, stats = np.polynomial.Polynomial.fit( - x_seq, - y_seq, - degree, # linear - full=True, - window=(x_min, x_max), - domain=(x_min, x_max), + x_seq, y_seq, degree, full=True, window=(x_min, x_max), domain=(x_min, x_max) # linear ) b, m = pfit x0 = x_min @@ -399,11 +374,7 @@ def rotate_about_origin(xy, theta): def rotate_about_center(xy, theta, center_xy): - warn( - 'geometry_2d.rotate_about_center is deprecated. Use TransformXY instead.', - DeprecationWarning, - stacklevel=2, - ) + warn('geometry_2d.rotate_about_center is deprecated. Use TransformXY instead.', DeprecationWarning, stacklevel=2) x = xy[0] y = xy[1] cx = center_xy[0] diff --git a/opencsp/common/lib/geometry/geometry_3d.py b/opencsp/common/lib/geometry/geometry_3d.py index 901a1c51..69f48fe1 100644 --- a/opencsp/common/lib/geometry/geometry_3d.py +++ b/opencsp/common/lib/geometry/geometry_3d.py @@ -12,9 +12,7 @@ from warnings import warn -def direction_uxyz_given_azimuth_elevation( - azimuth: float, elevation: float -): # Both radians. +def direction_uxyz_given_azimuth_elevation(azimuth: float, elevation: float): # Both radians. warn( 'geometry_3d.direction_uxyz_given_azimuth_elevation is deprecated. This function should be migrated to another library.', DeprecationWarning, @@ -54,9 +52,7 @@ def distance_between_xyz_points(xyz_1, xyz_2): def vector_3d_cross_product(vxyz_1, vxyz_2): warn( - 'geometry_3d.vector_3d_cross_product is deprecated. Use Vxyz.cross() instead.', - DeprecationWarning, - stacklevel=2, + 'geometry_3d.vector_3d_cross_product is deprecated. Use Vxyz.cross() instead.', DeprecationWarning, stacklevel=2 ) return list( np.cross(np.array(vxyz_1), np.array(vxyz_2)) @@ -64,19 +60,13 @@ def vector_3d_cross_product(vxyz_1, vxyz_2): def vector_3d_norm(vxyz): - warn( - 'geometry_3d.vector_3d_norm is deprecated. Use Vxyz.magnitude() instead.', - DeprecationWarning, - stacklevel=2, - ) + warn('geometry_3d.vector_3d_norm is deprecated. Use Vxyz.magnitude() instead.', DeprecationWarning, stacklevel=2) return np.sqrt(vxyz[0] ** 2 + vxyz[1] ** 2 + vxyz[2] ** 2) def normalize_vector_3d(vxyz): warn( - 'geometry_3d.normalize_vector_3d is deprecated. Use Vxyz.normalize() instead.', - DeprecationWarning, - stacklevel=2, + 'geometry_3d.normalize_vector_3d is deprecated. Use Vxyz.normalize() instead.', DeprecationWarning, stacklevel=2 ) norm = vector_3d_norm(vxyz) return [vxyz[0] / norm, vxyz[1] / norm, vxyz[2] / norm] @@ -232,14 +222,8 @@ def construct_line_3d_given_two_points( y2 = xyz_2[1] z2 = xyz_2[2] # Check input. - if ( - (abs(x1 - x2) <= tolerance) - and (abs(y1 - y2) <= tolerance) - and (abs(z1 - z2) <= tolerance) - ): - print( - 'ERROR: In construct_line_3d_given_two_points(), degenerate point pair encountered.' - ) + if (abs(x1 - x2) <= tolerance) and (abs(y1 - y2) <= tolerance) and (abs(z1 - z2) <= tolerance): + print('ERROR: In construct_line_3d_given_two_points(), degenerate point pair encountered.') assert False # Compute attributes. length = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2) @@ -257,29 +241,19 @@ def construct_line_3d_given_two_points( line_3d['xyz_1'] = xyz_1 # First example point. line_3d['xyz_2'] = xyz_2 # Second example point. line_3d['length'] = length # Euclidean distance between example points. - line_3d['length_xy'] = ( - length_xy # Euclidean distance between example points, projected onto the xy plane. - ) + line_3d['length_xy'] = length_xy # Euclidean distance between example points, projected onto the xy plane. line_3d['mid_xyz'] = mid_xyz # Point midway between the two example points. - line_3d['vxyz'] = ( - vxyz # Vector pointing from first example point to second example point. - ) - line_3d['uxyz'] = ( - uxyz # Unit vector pointing from first example point to second example point. - ) + line_3d['vxyz'] = vxyz # Vector pointing from first example point to second example point. + line_3d['uxyz'] = uxyz # Unit vector pointing from first example point to second example point. line_3d['theta'] = ( theta # Angle the line points, after projecting onto the xy plane, measured ccw about the z axis. ) - line_3d['eta'] = ( - eta # Angle the line points above the xy plane (negative values indicate below the xy plane). - ) + line_3d['eta'] = eta # Angle the line points above the xy plane (negative values indicate below the xy plane). # Return. return line_3d -def closest_point_on_line_3d( - xyz, line_3d -): # ?? SCAFFOLDING RCB -- THE 3-D LINE SHOULD BE A CLASS. +def closest_point_on_line_3d(xyz, line_3d): # ?? SCAFFOLDING RCB -- THE 3-D LINE SHOULD BE A CLASS. """ Returns the point on the infinite 3d line that is closest to the given point. """ @@ -306,9 +280,7 @@ def closest_point_on_line_3d( # Because (v1 dot v2) = |v1|*|v2|*cos(theta), where theta is the angle betwe the vectors, # and because uxyz is a unit vector, the dot product is the signed distance along the line, # measured from the mid point. - signed_distance_along_line = ( - (ux * mid_to_xyz_x) + (uy * mid_to_xyz_y) + (uz * mid_to_xyz_z) - ) + signed_distance_along_line = (ux * mid_to_xyz_x) + (uy * mid_to_xyz_y) + (uz * mid_to_xyz_z) # Construct the closest point. closest_x = mid_x + (ux * signed_distance_along_line) closest_y = mid_y + (uy * signed_distance_along_line) @@ -318,9 +290,7 @@ def closest_point_on_line_3d( return closest_xyz -def distance_to_line_3d( - xyz, line_3d -): # ?? SCAFFOLDING RCB -- THE 3-D LINE SHOULD BE A CLASS. +def distance_to_line_3d(xyz, line_3d): # ?? SCAFFOLDING RCB -- THE 3-D LINE SHOULD BE A CLASS. """ Returns the shortest distance from the given point to the infinite 3-d line. """ diff --git a/opencsp/common/lib/geometry/matrix_geometry_3d.py b/opencsp/common/lib/geometry/matrix_geometry_3d.py index 8fa0a27e..0a4e8a6e 100644 --- a/opencsp/common/lib/geometry/matrix_geometry_3d.py +++ b/opencsp/common/lib/geometry/matrix_geometry_3d.py @@ -33,6 +33,4 @@ def rotate(points: ndarray, rot_vecs: ndarray) -> ndarray: cos_theta = np.cos(theta) sin_theta = np.sin(theta) - return ( - cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v - ) + return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v diff --git a/opencsp/common/lib/geometry/test/test_LineXY.py b/opencsp/common/lib/geometry/test/test_LineXY.py index 49a39e65..c8e4b4ea 100644 --- a/opencsp/common/lib/geometry/test/test_LineXY.py +++ b/opencsp/common/lib/geometry/test/test_LineXY.py @@ -19,27 +19,7 @@ def test_n_vec(self): def test_from_points(self): pts = Vxy( - [ - np.arange(16), - [ - 1.1, - 2.4, - 3.1, - 4.6, - 5.2, - 6.8, - 7.3, - 8.7, - 9.9, - 10.3, - 11.5, - 12.1, - 13.8, - 14.4, - 15.0, - 16.5, - ], - ] + [np.arange(16), [1.1, 2.4, 3.1, 4.6, 5.2, 6.8, 7.3, 8.7, 9.9, 10.3, 11.5, 12.1, 13.8, 14.4, 15.0, 16.5]] ) with np.testing.assert_raises(ValueError): diff --git a/opencsp/common/lib/geometry/transform_3d.py b/opencsp/common/lib/geometry/transform_3d.py index 4f1b06f0..245a0675 100644 --- a/opencsp/common/lib/geometry/transform_3d.py +++ b/opencsp/common/lib/geometry/transform_3d.py @@ -10,9 +10,7 @@ from warnings import warn -def axisrotation( - unit_vector, angle -): # ?? SCAFFOLDING RCB -- ADD UNDERSCORE BETWEEN "AXIS" AND "ROTATION" +def axisrotation(unit_vector, angle): # ?? SCAFFOLDING RCB -- ADD UNDERSCORE BETWEEN "AXIS" AND "ROTATION" warn( 'transform_3d.axisrotation is deprecated. Replace with scipy.spatial.transform.Rotation', DeprecationWarning, @@ -33,12 +31,7 @@ def axisrotation( # The input vector must be a unit vector. norm = np.sqrt(ux**2 + uy**2 + uz**2) if abs(norm - 1.0) > 1e-9: # tolerance - print( - 'ERROR: In axisrotation(), input unit_vector =', - unit_vector, - ' is not of unit length. Length =', - norm, - ) + print('ERROR: In axisrotation(), input unit_vector =', unit_vector, ' is not of unit length. Length =', norm) c = np.cos(lhr_angle) s = np.sin(lhr_angle) diff --git a/opencsp/common/lib/opencsp_path/__init__.py b/opencsp/common/lib/opencsp_path/__init__.py index fd0abc19..984d7405 100644 --- a/opencsp/common/lib/opencsp_path/__init__.py +++ b/opencsp/common/lib/opencsp_path/__init__.py @@ -8,11 +8,7 @@ import opencsp.common.lib.tool.log_tools as lt _orp_settings_key = "opencsp_root_path" -_orp_settings_default = { - "example_data_dir": None, - "scratch_dir": None, - "scratch_name": "scratch", -} +_orp_settings_default = {"example_data_dir": None, "scratch_dir": None, "scratch_name": "scratch"} """ example_data_dir: The directory containing the opencsp example data, for examples that have very large data inputs. scratch_dir: The directory containing the scratch folder, for use with HPC clusters. scratch_name: The name of the scratch directory. Default to "scratch". @@ -55,16 +51,16 @@ def __load_settings_files(): settings_file_name_path_ext = os.path.join(dir, 'settings.json') # would use file_tools.directory_exists() except that I don't want to depend on any other part of opencsp - if os.path.exists(settings_file_name_path_ext) and os.path.isfile( - settings_file_name_path_ext - ): + if os.path.exists(settings_file_name_path_ext) and os.path.isfile(settings_file_name_path_ext): with open(settings_file_name_path_ext, 'r') as fin: lines = fin.readlines() lines = map(lambda l: "" if l.strip().startswith("//") else l, lines) settings = json.loads("\n".join(lines)) # verify the types for the loaded settings - err_msg_preamble = f"Error in opencsp_path.__init__(): while parsing settings file {settings_file_name_path_ext}, " + err_msg_preamble = ( + f"Error in opencsp_path.__init__(): while parsing settings file {settings_file_name_path_ext}, " + ) found_err = False if not isinstance(settings, dict): lt.error( @@ -127,13 +123,9 @@ def __populate_settings_list() -> list[tuple[str, dict[str, any]]]: opencsp_path = os.path.dirname(inspect.getfile(opencsp)) for package_name in _opencsp_code_settings_packages: module_name = "opencsp_code." + package_name - package_dir = os.path.abspath( - os.path.join(opencsp_path, "..", package_name.replace(".", "/")) - ) + package_dir = os.path.abspath(os.path.join(opencsp_path, "..", package_name.replace(".", "/"))) if os.path.exists(package_dir): - spec = importlib.util.spec_from_file_location( - module_name, package_dir + "/__init__.py" - ) + spec = importlib.util.spec_from_file_location(module_name, package_dir + "/__init__.py") module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) diff --git a/opencsp/common/lib/opencsp_path/data_path_for_test.py b/opencsp/common/lib/opencsp_path/data_path_for_test.py index f9cceab8..a073d797 100644 --- a/opencsp/common/lib/opencsp_path/data_path_for_test.py +++ b/opencsp/common/lib/opencsp_path/data_path_for_test.py @@ -14,24 +14,13 @@ def __sandia_nsttf_test_definition_dir(): return os.path.join( - orp.opencsp_code_dir(), - 'common', - 'lib', - 'test', - 'data', - 'input', - 'sandia_nsttf_test_definition', + orp.opencsp_code_dir(), 'common', 'lib', 'test', 'data', 'input', 'sandia_nsttf_test_definition' ) def sandia_nsttf_test_heliostats_origin_file(): - return os.path.join( - __sandia_nsttf_test_definition_dir(), - 'NSTTF_Heliostats_origin_at_torque_tube.csv', - ) + return os.path.join(__sandia_nsttf_test_definition_dir(), 'NSTTF_Heliostats_origin_at_torque_tube.csv') def sandia_nsttf_test_facet_centroidsfile(): - return os.path.join( - __sandia_nsttf_test_definition_dir(), 'NSTTF_Facet_Centroids.csv' - ) + return os.path.join(__sandia_nsttf_test_definition_dir(), 'NSTTF_Facet_Centroids.csv') diff --git a/opencsp/common/lib/opencsp_path/opencsp_root_path.py b/opencsp/common/lib/opencsp_path/opencsp_root_path.py index 976d48c5..3662a8af 100644 --- a/opencsp/common/lib/opencsp_path/opencsp_root_path.py +++ b/opencsp/common/lib/opencsp_path/opencsp_root_path.py @@ -38,9 +38,7 @@ def opencsp_data_example_dir(): def opencsp_data_test_dir(): """This method deprecated. For most tests you can find the data in the neighboring \"data\" directory, inside the \"test\" directory.""" - lt.warn( - "Deprecation warning (opencsp_data_test_dir): " + opencsp_data_test_dir.__doc__ - ) + lt.warn("Deprecation warning (opencsp_data_test_dir): " + opencsp_data_test_dir.__doc__) def opencsp_scratch_dir(project_dir=None) -> str: @@ -53,53 +51,31 @@ def opencsp_scratch_dir(project_dir=None) -> str: shared directory between multiple computers (aka network file system).""" scratch_dir: str = opencsp_settings["opencsp_root_path"]["scratch_dir"] if scratch_dir != None and os.path.exists(scratch_dir): - actual_scratch_dir = os.path.join( - scratch_dir, opencsp_settings["opencsp_root_path"]["scratch_name"] - ) - return ( - actual_scratch_dir - if project_dir == None - else os.path.join(actual_scratch_dir, project_dir) - ) + actual_scratch_dir = os.path.join(scratch_dir, opencsp_settings["opencsp_root_path"]["scratch_name"]) + return actual_scratch_dir if project_dir == None else os.path.join(actual_scratch_dir, project_dir) if os.name == "nt": # Check for a scratch mirror directory on the user's computer. actual_scratch_dir = os.path.join( - opencsp_code_dir(), - '..', - opencsp_settings["opencsp_root_path"]["scratch_name"], + opencsp_code_dir(), '..', opencsp_settings["opencsp_root_path"]["scratch_name"] ) actual_scratch_dir = ( - actual_scratch_dir - if project_dir == None - else os.path.join(actual_scratch_dir, project_dir) + actual_scratch_dir if project_dir == None else os.path.join(actual_scratch_dir, project_dir) ) if os.path.isdir(actual_scratch_dir): return actual_scratch_dir # This is a directory on windows that we should be able to write to actual_scratch_dir = os.path.join( - os.path.expandvars("%LOCALAPPDATA%"), - "opencsp", - opencsp_settings["opencsp_root_path"]["scratch_name"], - ) - return ( - actual_scratch_dir - if project_dir == None - else os.path.join(actual_scratch_dir, project_dir) + os.path.expandvars("%LOCALAPPDATA%"), "opencsp", opencsp_settings["opencsp_root_path"]["scratch_name"] ) + return actual_scratch_dir if project_dir == None else os.path.join(actual_scratch_dir, project_dir) else: # On the cluster nodes, we should be writing to the scratch file system for multi-node programs. # Aka don't do this: # return os.path.join(os.path.expanduser('~'), ".opencsp/cache") - actual_scratch_dir = ( - f"/{opencsp_settings['opencsp_root_path']['scratch_name']}/" - ) - return ( - actual_scratch_dir - if project_dir == None - else os.path.join(actual_scratch_dir, project_dir) - ) + actual_scratch_dir = f"/{opencsp_settings['opencsp_root_path']['scratch_name']}/" + return actual_scratch_dir if project_dir == None else os.path.join(actual_scratch_dir, project_dir) def opencsp_cache_dir(): @@ -132,9 +108,7 @@ def _opencsp_settings_dirs(): # ret.append(os.path.join(os.path.expandvars("%LOCALAPPDATA%"), "opencsp", "settings")) # ret.append(os.path.join(os.path.expandvars("%APPDATA%"), "opencsp", "settings")) else: - ret.append( - os.path.join(os.path.expanduser("~"), ".config", "opencsp", "settings") - ) + ret.append(os.path.join(os.path.expanduser("~"), ".config", "opencsp", "settings")) if "OPENCSP_SETTINGS_DIRS" in os.environ: if os.environ["OPENCSP_SETTINGS_DIRS"] == "None": @@ -150,7 +124,4 @@ def _opencsp_settings_dirs(): def relative_opencsp_data_test_dir(): """This method deprecated. For most tests you can find the data in the neighboring \"data\" directory, inside the \"test\" directory.""" - lt.warn( - "Deprecation warning (relative_opencsp_data_test_dir): " - + relative_opencsp_data_test_dir.__doc__ - ) + lt.warn("Deprecation warning (relative_opencsp_data_test_dir): " + relative_opencsp_data_test_dir.__doc__) diff --git a/opencsp/common/lib/opencsp_path/test/test_opencsp_root_path.py b/opencsp/common/lib/opencsp_path/test/test_opencsp_root_path.py index 6dd09a76..d796edec 100644 --- a/opencsp/common/lib/opencsp_path/test/test_opencsp_root_path.py +++ b/opencsp/common/lib/opencsp_path/test/test_opencsp_root_path.py @@ -12,10 +12,10 @@ class test_opencsp_root_path(unittest.TestCase): @classmethod def setUpClass(cls) -> None: - cls.tmp_settings_file = os.path.join( - os.path.expanduser("~"), ".opencsp", "settings.json" + cls.tmp_settings_file = os.path.join(os.path.expanduser("~"), ".opencsp", "settings.json") + cls.tmp_settings_contents = ( + '{ "opencsp_root_path": { "example_data_dir": "e/f", "scratch_dir": "s/t", "scratch_name": "u" } }' ) - cls.tmp_settings_contents = '{ "opencsp_root_path": { "example_data_dir": "e/f", "scratch_dir": "s/t", "scratch_name": "u" } }' cls.did_create_settings_file = False if not ft.file_exists(cls.tmp_settings_file): path, _, _ = ft.path_components(cls.tmp_settings_file) @@ -38,9 +38,7 @@ def get_opencsp_path(self): if "opencsp" not in self_path_dirs: self.skipTest(f"Can't find directory 'opencsp' in {self_path}.") - opencsp_idx = len(self_path_dirs) - list(reversed(self_path_dirs)).index( - "opencsp" - ) + opencsp_idx = len(self_path_dirs) - list(reversed(self_path_dirs)).index("opencsp") root_to_opencsp_dirs = self_path_dirs[:opencsp_idx] return os.path.sep.join(root_to_opencsp_dirs) @@ -69,18 +67,13 @@ def test_opencsp_cache_dir(self): def test_opencsp_temporary_dir(self): """Just test that the opencsp_temporary_dir() method works. TODO actually test the returned value.""" - self.assertTrue( - ("temp" in orp.opencsp_temporary_dir()) - or ("tmp" in orp.opencsp_temporary_dir()) - ) + self.assertTrue(("temp" in orp.opencsp_temporary_dir()) or ("tmp" in orp.opencsp_temporary_dir())) def test__opencsp_settings_dirs(self): """Just test that the _opencsp_settings_dirs() method works. TODO actually test the returned value.""" orp._opencsp_settings_dirs() - @unittest.skip( - "Can't get this test to work. Maybe someone smarter than me can make it work? :(" - ) + @unittest.skip("Can't get this test to work. Maybe someone smarter than me can make it work? :(") @unittest.mock.patch.dict(os.environ, {"OPENCSP_SETTINGS_DIRS": "~/.opencsp/"}) def test_settings_file(self): """Creates a temporary "settings.json" file if one doesn't already diff --git a/opencsp/common/lib/photogrammetry/ImageMarker.py b/opencsp/common/lib/photogrammetry/ImageMarker.py index 33c34a33..c95da3fa 100644 --- a/opencsp/common/lib/photogrammetry/ImageMarker.py +++ b/opencsp/common/lib/photogrammetry/ImageMarker.py @@ -24,14 +24,7 @@ class ImageMarker: """Class to hold images of Aruco markers. Contains methods to process locations of Aruco markers.""" - def __init__( - self, - image: ndarray, - point_ids: ndarray[int], - pts_im_xy: ndarray, - img_id: int, - camera: Camera, - ): + def __init__(self, image: ndarray, point_ids: ndarray[int], pts_im_xy: ndarray, img_id: int, camera: Camera): """ Instantiates ImageMarker class. @@ -80,27 +73,17 @@ def plot_image_with_points(self) -> None: """Plots captured image with image point and reprojected point locations""" # Calculate reprojected points pts_obj_known = self.pts_obj_xyz[self.located_markers_mask] - pts_reproj = self.camera.project_mat( - pts_obj_known, self.rvec, self.tvec - ) # Nx2 points + pts_reproj = self.camera.project_mat(pts_obj_known, self.rvec, self.tvec) # Nx2 points # Plot ax = plt.axes() ax.imshow(self.image) - ax.scatter( - *pts_reproj.T, - marker='o', - facecolor='none', - edgecolor='red', - label='Reprojected', - ) + ax.scatter(*pts_reproj.T, marker='o', facecolor='none', edgecolor='red', label='Reprojected') ax.scatter(*self.pts_im_xy.T, marker='.', color='blue', label='Image points') ax.legend() @classmethod - def load_aruco_origin( - cls, file: str, img_id: int, camera: Camera, **kwargs - ) -> 'ImageMarker': + def load_aruco_origin(cls, file: str, img_id: int, camera: Camera, **kwargs) -> 'ImageMarker': """Loads an image file, finds Aruco markers, saves the origin point. Parameters @@ -151,9 +134,7 @@ def convert_to_four_corner(self, **kwargs) -> None: self.pts_im_xy = pts_im_xy mask = np.array([1, 0, 0, 0] * num_markers) - self.located_markers_mask = ( - np.repeat(self.located_markers_mask, 4) * mask - ).astype(bool) + self.located_markers_mask = (np.repeat(self.located_markers_mask, 4) * mask).astype(bool) mask = np.array([1, np.nan, np.nan, np.nan] * num_markers) self.pts_obj_xyz = np.repeat(self.pts_obj_xyz, 4, axis=0) * mask[:, None] @@ -180,9 +161,7 @@ def attempt_calculate_pose(self) -> int: pts_img = self.located_marker_points_image # Use SolvePNP - ret, rvec, tvec = cv.solvePnP( - pts_obj, pts_img, self.camera.intrinsic_mat, self.camera.distortion_coef - ) + ret, rvec, tvec = cv.solvePnP(pts_obj, pts_img, self.camera.intrinsic_mat, self.camera.distortion_coef) rvec: ndarray = rvec.squeeze() tvec: ndarray = tvec.squeeze() @@ -193,8 +172,7 @@ def attempt_calculate_pose(self) -> int: lt.debug(f'Camera pose {self.img_id:d} solved') # Check if pose is valid - valid = ph.valid_camera_pose( - self.camera, rvec, tvec, pts_img, pts_obj) + valid = ph.valid_camera_pose(self.camera, rvec, tvec, pts_img, pts_obj) if not valid: lt.debug(f'Camera pose {self.img_id:d} not valid') return -1 @@ -303,6 +281,4 @@ def calc_reprojection_errors(self) -> ndarray: pts_world = self.pts_obj_xyz[self.located_markers_mask] # Nx3 pts_im_reproj = self.camera.project_mat(pts_world, self.rvec, self.tvec) # Calculate error - return np.sqrt( - np.sum((self.pts_im_xy[self.located_markers_mask] - pts_im_reproj) ** 2, 1) - ) + return np.sqrt(np.sum((self.pts_im_xy[self.located_markers_mask] - pts_im_reproj) ** 2, 1)) diff --git a/opencsp/common/lib/photogrammetry/bundle_adjustment.py b/opencsp/common/lib/photogrammetry/bundle_adjustment.py index 1bf8ff69..6aaa6bb5 100644 --- a/opencsp/common/lib/photogrammetry/bundle_adjustment.py +++ b/opencsp/common/lib/photogrammetry/bundle_adjustment.py @@ -67,9 +67,7 @@ def bundle_adjust( """ # Check inputs if opt_type not in ['camera', 'points', 'both']: - raise ValueError( - f'Given opt_type must be one of ("camera", "points", "both"), not "{opt_type:s}"' - ) + raise ValueError(f'Given opt_type must be one of ("camera", "points", "both"), not "{opt_type:s}"') # Calculate number of cameras and points n_cameras = rvecs.shape[0] @@ -80,9 +78,7 @@ def bundle_adjust( x0 = np.hstack((params.ravel(), pts_obj.ravel())) # Create Jacobian sparsity structure - jac_sparsity = bundle_adjustment_sparsity( - n_cameras, n_points, camera_indices, point_indices, opt_type - ) + jac_sparsity = bundle_adjustment_sparsity(n_cameras, n_points, camera_indices, point_indices, opt_type) # Optimize res = least_squares( @@ -93,15 +89,7 @@ def bundle_adjust( x_scale='jac', ftol=1e-4, method='trf', - args=( - n_cameras, - n_points, - camera_indices, - point_indices, - pts_img, - intrinsic_mat, - dist_coefs, - ), + args=(n_cameras, n_points, camera_indices, point_indices, pts_img, intrinsic_mat, dist_coefs), ) lt.debug('Bundle adjustment finished: ' + res.message) @@ -109,7 +97,7 @@ def bundle_adjust( data = res.x[: n_cameras * 6].reshape((n_cameras, 6)) rvecs_opt = data[:, :3] tvecs_opt = data[:, 3:] - pts_obj_opt = res.x[n_cameras * 6:].reshape((n_points, 3)) + pts_obj_opt = res.x[n_cameras * 6 :].reshape((n_points, 3)) return rvecs_opt, tvecs_opt, pts_obj_opt @@ -127,17 +115,10 @@ def rotate(points: np.ndarray, rot_vecs: np.ndarray): cos_theta = np.cos(theta) sin_theta = np.sin(theta) - return ( - cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v - ) + return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v -def project( - points: np.ndarray, - camera_params: np.ndarray, - intrinsic_mat: np.ndarray, - dist_coefs: np.ndarray, -): +def project(points: np.ndarray, camera_params: np.ndarray, intrinsic_mat: np.ndarray, dist_coefs: np.ndarray): """ Convert 3-D points to 2-D by projecting onto camera sensor. A ray with normal incidence has a (0, 0) coordinate. @@ -150,9 +131,7 @@ def project( # Project rvec = np.array([0.0, 0.0, 0.0]) tvec = np.array([0.0, 0.0, 0.0]) - points_proj = cv.projectPoints(points_cam, rvec, tvec, intrinsic_mat, dist_coefs)[ - 0 - ][:, 0, :] + points_proj = cv.projectPoints(points_cam, rvec, tvec, intrinsic_mat, dist_coefs)[0][:, 0, :] return points_proj @@ -172,22 +151,13 @@ def fun( """ camera_params = params[: n_cameras * 6].reshape((n_cameras, 6)) - points_3d = params[n_cameras * 6:].reshape((n_points, 3)) - points_proj = project( - points_3d[point_indices], - camera_params[camera_indices], - intrinsic_mat, - dist_coefs, - ) + points_3d = params[n_cameras * 6 :].reshape((n_points, 3)) + points_proj = project(points_3d[point_indices], camera_params[camera_indices], intrinsic_mat, dist_coefs) return (points_proj - points_2d).ravel() def bundle_adjustment_sparsity( - n_cameras: int, - n_points: int, - camera_indices: np.ndarray, - point_indices: np.ndarray, - opt_type: str, + n_cameras: int, n_points: int, camera_indices: np.ndarray, point_indices: np.ndarray, opt_type: str ): """ Returns Jacobian sparsity structure. diff --git a/opencsp/common/lib/photogrammetry/photogrammetry.py b/opencsp/common/lib/photogrammetry/photogrammetry.py index 4560641b..7308e496 100644 --- a/opencsp/common/lib/photogrammetry/photogrammetry.py +++ b/opencsp/common/lib/photogrammetry/photogrammetry.py @@ -33,9 +33,7 @@ def load_image_grayscale(file: str) -> ndarray: def find_aruco_marker( - image: ndarray, - adaptiveThreshConstant: float = 10, - minMarkerPerimeterRate: float = 0.01, + image: ndarray, adaptiveThreshConstant: float = 10, minMarkerPerimeterRate: float = 0.01 ) -> tuple[ndarray[int], list[ndarray]]: # , """ Finds aruco marker corners in given image to the nearest pixel. @@ -65,9 +63,7 @@ def find_aruco_marker( aruco_detect_params.minMarkerPerimeterRate = minMarkerPerimeterRate # Find targets - (corners, ids, _) = cv.aruco.detectMarkers( - image, aruco_dict, parameters=aruco_detect_params - ) + (corners, ids, _) = cv.aruco.detectMarkers(image, aruco_dict, parameters=aruco_detect_params) # Refine corner locations (inaccurate using cv.cornerSubPix) # criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, max_iterations, precision) @@ -85,12 +81,7 @@ def find_aruco_marker( def valid_camera_pose( - camera: Camera, - rvec: ndarray, - tvec: ndarray, - pts_image: ndarray, - pts_object: ndarray, - reproj_thresh: float = 100.0, + camera: Camera, rvec: ndarray, tvec: ndarray, pts_image: ndarray, pts_object: ndarray, reproj_thresh: float = 100.0 ) -> bool: """ Returns image IDs that have points behind the camera or have high @@ -173,13 +164,7 @@ def reprojection_errors( return pts.reshape((-1, 2)) -def plot_pts_3d( - ax: plt.Axes, - pts_obj: ndarray, - rots: list[Rotation], - tvecs: Vxyz, - needle_length: float = 1, -) -> None: +def plot_pts_3d(ax: plt.Axes, pts_obj: ndarray, rots: list[Rotation], tvecs: Vxyz, needle_length: float = 1) -> None: """ Plots 3D points and camera poses (points with needles defined by rvec/tvec). @@ -221,9 +206,7 @@ def plot_pts_3d( ax.set_zlabel('z') -def align_points( - pts_obj: Vxyz, vals: Vxyz, scale: bool = False -) -> tuple[TransformXYZ, float, ndarray[float]]: +def align_points(pts_obj: Vxyz, vals: Vxyz, scale: bool = False) -> tuple[TransformXYZ, float, ndarray[float]]: """ Returns 2D homogeneous transform to apply to input data according to alignment criteria. Values are scaled (if applicable) FIRST, then @@ -324,9 +307,7 @@ def _ref_coord_error(pts_obj: Vxyz, pts_exp: Vxyz) -> np.ndarray: return np.array(error) -def scale_points( - pts_obj: Vxyz, point_ids: ndarray, point_pairs: ndarray, dists: ndarray -) -> ndarray[float]: +def scale_points(pts_obj: Vxyz, point_ids: ndarray, point_pairs: ndarray, dists: ndarray) -> ndarray[float]: """ Scales object points and tvecs. A list of point pairs is given, and the corresponding expected distance between them. The object points are scaled @@ -394,10 +375,7 @@ def dist_from_rays(v_pt: Vxyz, u_ray_dir: Vxyz | Uxyz, v_ray_ori: Vxyz) -> ndarr def triangulate( - cameras: list[Camera], - rots: list[Rotation], - tvecs: Vxyz | list[Vxyz], - pts_img: Vxy | list[Vxy], + cameras: list[Camera], rots: list[Rotation], tvecs: Vxyz | list[Vxyz], pts_img: Vxy | list[Vxy] ) -> tuple[Vxyz, ndarray]: """Triangulates position of unknown marker. @@ -422,9 +400,7 @@ def triangulate( u_rays = np.zeros((3, len(cameras))) # direction of rays # Collect rays/origins and convert to lab reference frame - for idx, (camera, rvec, tvec, pt_img) in enumerate( - zip(cameras, rots, tvecs, pts_img) - ): + for idx, (camera, rvec, tvec, pt_img) in enumerate(zip(cameras, rots, tvecs, pts_img)): # Calculate camera position and rays in object reference frame r_cam_world = rvec.inv() v_world_cam_world = -tvec.rotate(r_cam_world) @@ -445,9 +421,7 @@ def triangulate( return pt_int, dists -def nearest_ray_intersection( - p_origins: Vxyz, u_dirs: Vxyz | Uxyz -) -> tuple[Vxyz, ndarray]: +def nearest_ray_intersection(p_origins: Vxyz, u_dirs: Vxyz | Uxyz) -> tuple[Vxyz, ndarray]: """ Finds the least squares point of intersection between N skew rays. And calculates residuals. @@ -480,9 +454,7 @@ def nearest_ray_intersection( # x: 3d point -> (3 ,1) array i_mat = np.eye(3) p_int = np.linalg.lstsq( - a=(i_mat - u_dirs_mat).sum(axis=0), - b=((i_mat - u_dirs_mat) @ p_origins_mat).sum(axis=0), - rcond=None, + a=(i_mat - u_dirs_mat).sum(axis=0), b=((i_mat - u_dirs_mat) @ p_origins_mat).sum(axis=0), rcond=None )[0] # Calculate intersection errors (perpendicular distances to rays) diff --git a/opencsp/common/lib/photogrammetry/test/test_bundle_adjustment.py b/opencsp/common/lib/photogrammetry/test/test_bundle_adjustment.py index 0b0ae83b..f63c1c40 100644 --- a/opencsp/common/lib/photogrammetry/test/test_bundle_adjustment.py +++ b/opencsp/common/lib/photogrammetry/test/test_bundle_adjustment.py @@ -33,16 +33,7 @@ def test_bundle_adjust_points(): tvecs_in = tvecs.copy() rvecs_out, tvecs_out, pts_obj_out = ba.bundle_adjust( - rvecs_in, - tvecs_in, - pts_obj, - cam_indices, - point_indices, - pts_img, - int_mat, - dist_coefs, - 'points', - verbose=True, + rvecs_in, tvecs_in, pts_obj, cam_indices, point_indices, pts_img, int_mat, dist_coefs, 'points', verbose=True ) np.testing.assert_allclose(rvecs_out, rvecs, atol=1e-6, rtol=0) @@ -72,16 +63,7 @@ def test_bundle_adjust_camera(): tvecs_in = tvecs.copy() + np.random.randn(*tvecs.shape) * 0.01 rvecs_out, tvecs_out, pts_obj_out = ba.bundle_adjust( - rvecs_in, - tvecs_in, - pts_obj, - cam_indices, - point_indices, - pts_img, - int_mat, - dist_coefs, - 'camera', - verbose=True, + rvecs_in, tvecs_in, pts_obj, cam_indices, point_indices, pts_img, int_mat, dist_coefs, 'camera', verbose=True ) np.testing.assert_allclose(rvecs_out, rvecs, atol=1e-3, rtol=0) diff --git a/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py b/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py index 4f98cf02..c6da1749 100644 --- a/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py +++ b/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py @@ -75,17 +75,13 @@ def test_reprojection_errors(): # Perfect case points_2d = np.zeros((4, 2)) - errors = ph.reprojection_errors( - rvecs, tvecs, pts_obj, camera, camera_indices, point_indices, points_2d - ) + errors = ph.reprojection_errors(rvecs, tvecs, pts_obj, camera, camera_indices, point_indices, points_2d) np.testing.assert_equal(errors, np.zeros((4, 2))) # 1 pixel off in x on camera 0 points_2d = np.zeros((4, 2)) points_2d[:2, 0] = 1 - errors = ph.reprojection_errors( - rvecs, tvecs, pts_obj, camera, camera_indices, point_indices, points_2d - ) + errors = ph.reprojection_errors(rvecs, tvecs, pts_obj, camera, camera_indices, point_indices, points_2d) errors_exp = np.zeros((4, 2)) errors_exp[:2, 0] = -1 np.testing.assert_equal(errors, errors_exp) @@ -103,9 +99,7 @@ def test_align_points_no_scale(): np.testing.assert_allclose(error, np.zeros(3), atol=1e-6, rtol=0) np.testing.assert_equal(scale, 1.0) - np.testing.assert_allclose( - pts_obj_aligned.data, pts_obj_optimized.data, atol=1e-6, rtol=0 - ) + np.testing.assert_allclose(pts_obj_aligned.data, pts_obj_optimized.data, atol=1e-6, rtol=0) def test_align_points_with_scale(): @@ -120,9 +114,7 @@ def test_align_points_with_scale(): np.testing.assert_allclose(error, np.zeros(3), atol=1e-6, rtol=0) np.testing.assert_almost_equal(scale, 2.0, 6) - np.testing.assert_allclose( - pts_obj_aligned.data * 2, pts_obj_optimized.data, atol=1e-6, rtol=0 - ) + np.testing.assert_allclose(pts_obj_aligned.data * 2, pts_obj_optimized.data, atol=1e-6, rtol=0) def test_scale_points(): @@ -151,9 +143,7 @@ def test_triangulate(): pts_img = Vxy([[0, 1], [0, 1]]) pt, dists = ph.triangulate(cameras, rots, tvecs, pts_img) - np.testing.assert_allclose( - pt.data.squeeze(), np.array([0, 0, 1]), rtol=0, atol=1e-6 - ) + np.testing.assert_allclose(pt.data.squeeze(), np.array([0, 0, 1]), rtol=0, atol=1e-6) np.testing.assert_allclose(dists, np.array([0.0, 0.0]), rtol=0, atol=1e-6) @@ -163,7 +153,5 @@ def test_nearest_ray_intersection(): pt, dists = ph.nearest_ray_intersection(p_origins, u_dirs) - np.testing.assert_allclose( - pt.data.squeeze(), np.array([0, 0, 1]), atol=1e-6, rtol=0 - ) + np.testing.assert_allclose(pt.data.squeeze(), np.array([0, 0, 1]), atol=1e-6, rtol=0) np.testing.assert_allclose(dists, np.array([0, 0]), atol=1e-6, rtol=0) diff --git a/opencsp/common/lib/process/MemoryMonitor.py b/opencsp/common/lib/process/MemoryMonitor.py index e8ac895b..f8acc4e9 100644 --- a/opencsp/common/lib/process/MemoryMonitor.py +++ b/opencsp/common/lib/process/MemoryMonitor.py @@ -40,9 +40,7 @@ def __init__( - print_on_new_min (bool): If true, then for each second, if the memory available has reached a new minimum print out the usage for that second. Defaults to True. - always_print (bool): If True, then print out the usage every second. Defaults to False. """ - partitioner = ppart.ParallelPartitioner( - server_index + 1, server_index, cpu_index + 1, cpu_index - ) + partitioner = ppart.ParallelPartitioner(server_index + 1, server_index, cpu_index + 1, cpu_index) self.identifier = partitioner.identifier() """ String used to uniquely identify this server/processor core. """ self._proc: multiprocessing.Process = None @@ -71,9 +69,7 @@ def start(self): return False # start the process - executor = concurrent.futures.ThreadPoolExecutor( - 1, "mem_monitor_" + self.identifier - ) + executor = concurrent.futures.ThreadPoolExecutor(1, "mem_monitor_" + self.identifier) self._future = executor.submit(self._run) while self._start_datetime == None: time.sleep(0.1) diff --git a/opencsp/common/lib/process/ParallelPartitioner.py b/opencsp/common/lib/process/ParallelPartitioner.py index ca8f6671..b66b07cb 100644 --- a/opencsp/common/lib/process/ParallelPartitioner.py +++ b/opencsp/common/lib/process/ParallelPartitioner.py @@ -15,14 +15,7 @@ class ParallelPartitioner: Dask. """ - def __init__( - self, - nservers: int, - server_idx: int, - ncpus: int, - cpu_idx: int, - npartitions_ceil: int = -1, - ): + def __init__(self, nservers: int, server_idx: int, ncpus: int, cpu_idx: int, npartitions_ceil: int = -1): """Helps portion out data to be excecuted for a single server+cpu instance. Typical usage is to get all partitioners for all cores with the generator:: @@ -44,17 +37,13 @@ def __init__( if nservers > 0: if server_idx < 0 or server_idx >= nservers: - raise ValueError( - f"server_idx {server_idx} is out of range for nservers={nservers}" - ) + raise ValueError(f"server_idx {server_idx} is out of range for nservers={nservers}") if ncpus > 0: if cpu_idx < 0 or cpu_idx >= ncpus: raise ValueError(f"cpu_idx {cpu_idx} is out of range for ncpus={ncpus}") @classmethod - def get_partitioners( - cls, nservers: int, server_idx: int, ncpus: int, npartitions_ceil: int = -1 - ): + def get_partitioners(cls, nservers: int, server_idx: int, ncpus: int, npartitions_ceil: int = -1): """Generate partitioners to split data into even chunks for each node and cpu core. There are some cases where a task can be parallelized across many server nodes @@ -70,17 +59,10 @@ def get_partitioners( Returns: - list[ParallelPartitioner]: A list of parallel partitioners, one for each core for the given server_idx. """ - return [ - cls(nservers, server_idx, ncpus, i, npartitions_ceil) for i in range(ncpus) - ] + return [cls(nservers, server_idx, ncpus, i, npartitions_ceil) for i in range(ncpus)] def _get_portion_range( - self, - count: int, - nworkers: int, - worker_idx: int, - partitions_per_worker: int, - npartitions: int = -1, + self, count: int, nworkers: int, worker_idx: int, partitions_per_worker: int, npartitions: int = -1 ): if npartitions < 0: npartitions = partitions_per_worker * nworkers @@ -103,16 +85,9 @@ def _get_portion_range( return rstart, rend def _get_portion( - self, - data: list[T], - nworkers: int, - worker_idx: int, - partitions_per_worker: int, - npartitions: int = -1, + self, data: list[T], nworkers: int, worker_idx: int, partitions_per_worker: int, npartitions: int = -1 ): - rstart, rend = self._get_portion_range( - len(data), nworkers, worker_idx, partitions_per_worker, npartitions - ) + rstart, rend = self._get_portion_range(len(data), nworkers, worker_idx, partitions_per_worker, npartitions) # check range bounds if rstart == -1 and rend == -1: @@ -147,9 +122,7 @@ def get_my_range(self, data: list[T], desc: str = None): # check max partitions (server) if self.npartitions_ceil > 0: - npartitions_per_server = int( - max(self.npartitions_ceil / self.nservers, 1.0) - ) + npartitions_per_server = int(max(self.npartitions_ceil / self.nservers, 1.0)) # get the portion of data that this server should operate on rstart_serv, rend_serv = self._get_portion_range( @@ -163,9 +136,7 @@ def get_my_range(self, data: list[T], desc: str = None): return -1, -1 # Get a subselection of the data for this cpu - rstart_cpu, rend_cpu = self._get_portion_range( - rend_serv - rstart_serv, self.ncpus, self.cpu_idx, 1 - ) + rstart_cpu, rend_cpu = self._get_portion_range(rend_serv - rstart_serv, self.ncpus, self.cpu_idx, 1) rstart_cpu += rstart_serv rend_cpu += rstart_serv diff --git a/opencsp/common/lib/process/ServerSynchronizer.py b/opencsp/common/lib/process/ServerSynchronizer.py index fc9324d8..ffa9493d 100644 --- a/opencsp/common/lib/process/ServerSynchronizer.py +++ b/opencsp/common/lib/process/ServerSynchronizer.py @@ -13,12 +13,7 @@ class ServerSynchronizer: path = os.path.join(orp.opencsp_temporary_dir(), "synchronize_servers_by_file") def __init__( - self, - num_servers: int, - server_index: int, - propagate_errors=True, - timeout: int = 1000, - do_initial_wait=True, + self, num_servers: int, server_index: int, propagate_errors=True, timeout: int = 1000, do_initial_wait=True ): """Helper class to forces all servers to wait at specified synchronization points. This is particularly useful for scatter-gather type workflows. @@ -84,9 +79,7 @@ def __init__( if server_index == 0: self._remove_all_stop_files() else: - time.sleep( - 5 - ) # have all other servers wait for server 0 to remove the stop and error files + time.sleep(5) # have all other servers wait for server 0 to remove the stop and error files # Let the system know that this server is executing lt.info(f"ServerSynchronizer @{self.server_index} started") @@ -96,19 +89,14 @@ def __init__( self._wait(check_for_stopped_servers=False) def _wait_on_files( - self, - wait_file_path_name_exts: list[str], - stop_file_path_name_exts: list[str] = None, - msg: str = None, + self, wait_file_path_name_exts: list[str], stop_file_path_name_exts: list[str] = None, msg: str = None ): """Wait for all of the given "wait" indicator files (or their corresponding "stop" indicator files) to exist.""" alternates: dict[str, list[str]] = {} if stop_file_path_name_exts != None: for i in range(len(wait_file_path_name_exts)): alternates[wait_file_path_name_exts[i]] = [stop_file_path_name_exts[i]] - pft.wait_on_files( - wait_file_path_name_exts, self.timeout, alternates=alternates, msg=msg - ) + pft.wait_on_files(wait_file_path_name_exts, self.timeout, alternates=alternates, msg=msg) lt.debug(f"Server @{self.server_index}: found all files") # wait for all servers to see that the files exist time.sleep(5) @@ -165,9 +153,7 @@ def get_stopped_servers(self): -------- server_idxs (list[int]): List of all the stopped server indexes.""" ret: list[int] = [] - all_file_path_name_exts = [ - (i, self._get_file_stopped(i)) for i in range(self.num_servers) - ] + all_file_path_name_exts = [(i, self._get_file_stopped(i)) for i in range(self.num_servers)] for other_server_index, file_path_name_ext in all_file_path_name_exts: if ft.file_exists(file_path_name_ext): ret.append(other_server_index) @@ -180,9 +166,7 @@ def get_errored_servers(self): -------- ret (list[int]): List of all the errored server indexes.""" ret: list[tuple(int, str, str)] = [] - all_file_path_name_exts = [ - (i, self._get_file_error(i)) for i in range(self.num_servers) - ] + all_file_path_name_exts = [(i, self._get_file_error(i)) for i in range(self.num_servers)] for other_server_index, file_path_name_ext in all_file_path_name_exts: if ft.file_exists(file_path_name_ext): try: @@ -201,9 +185,7 @@ def _check_for_other_server_errors(self, method_name): error_msg (str|None): None if no errored servers, or a message indicating the type of error for the first errored server. """ errored_servers = self.get_errored_servers() - errored_servers = list( - filter(lambda es: es[0] != self.server_index, errored_servers) - ) + errored_servers = list(filter(lambda es: es[0] != self.server_index, errored_servers)) if len(errored_servers) > 0: errored_server, err_type, err_msg = errored_servers[0] return f"Error: in ServerSynchronizer.{method_name}(), server {errored_server} encountered a {err_type} with the message \"{err_msg}\"" @@ -228,9 +210,7 @@ def _wait(self, check_for_stopped_servers=True): return # create my file (1) - my_file_path_name_ext = self._get_file_waiting( - self.server_index, self._synchronization_index - ) + my_file_path_name_ext = self._get_file_waiting(self.server_index, self._synchronization_index) if not ft.file_exists(my_file_path_name_ext): ft.create_file(my_file_path_name_ext) else: @@ -243,9 +223,7 @@ def _wait(self, check_for_stopped_servers=True): # Getting ready for the _next_ wait(): # Make sure the next synchronization files don't exist. for i in range(self.num_servers): - next_file_path_name_ext = self._get_file_waiting( - i, self._synchronization_index + 1 - ) + next_file_path_name_ext = self._get_file_waiting(i, self._synchronization_index + 1) if ft.file_exists(next_file_path_name_ext): lt.warn( f"Warning: in ServerSynchronizer.wait(), next synchronization file {next_file_path_name_ext} " @@ -258,21 +236,12 @@ def _wait(self, check_for_stopped_servers=True): stop_file_path_name_exts = None if check_for_stopped_servers: stopped_idxs = self.get_stopped_servers() - running_idxs = list( - filter(lambda i: i not in stopped_idxs, range(self.num_servers)) - ) - wait_file_path_name_exts = [ - self._get_file_waiting(i, self._synchronization_index) - for i in running_idxs - ] + running_idxs = list(filter(lambda i: i not in stopped_idxs, range(self.num_servers))) + wait_file_path_name_exts = [self._get_file_waiting(i, self._synchronization_index) for i in running_idxs] if check_for_stopped_servers: - stop_file_path_name_exts = [ - self._get_file_stopped(i) for i in running_idxs - ] + stop_file_path_name_exts = [self._get_file_stopped(i) for i in running_idxs] self._wait_on_files( - wait_file_path_name_exts, - stop_file_path_name_exts, - msg=f"step {self._synchronization_index}", + wait_file_path_name_exts, stop_file_path_name_exts, msg=f"step {self._synchronization_index}" ) # propagate errors (4) @@ -305,9 +274,7 @@ def gather(self, value: str): sum (list[str]): All server values, in order. """ value_sync_index = self._synchronization_index - my_file_path_name_ext = self._get_file_value( - self.server_index, value_sync_index - ) + my_file_path_name_ext = self._get_file_value(self.server_index, value_sync_index) my_file_path_name_ext_tmp = my_file_path_name_ext + ".tmp" # remove the stale file, if any @@ -339,9 +306,7 @@ def gather(self, value: str): self.wait() if self._synchronization_index == value_sync_index: if self.num_servers > 1: - lt.warn( - "Huh, I expected the _synchronization_index to have incremented..." - ) + lt.warn("Huh, I expected the _synchronization_index to have incremented...") self._synchronization_index += 1 # gather the results @@ -352,9 +317,7 @@ def gather(self, value: str): with open(other_file_path_name_ext, "r") as fin: ret.append(fin.read()) else: - lt.warn( - f"Warning: in ServerSynchronizer.gather(), value file {other_file_path_name_ext} is missing!" - ) + lt.warn(f"Warning: in ServerSynchronizer.gather(), value file {other_file_path_name_ext} is missing!") return ret def _remove_all_stop_files(self): @@ -362,17 +325,12 @@ def _remove_all_stop_files(self): for i in range(self.num_servers): stop_file_path_name_ext = self._get_file_stopped(i) error_file_path_name_ext = self._get_file_error(i) - for file_path_name_ext in [ - stop_file_path_name_ext, - error_file_path_name_ext, - ]: + for file_path_name_ext in [stop_file_path_name_ext, error_file_path_name_ext]: if ft.file_exists(file_path_name_ext): try: os.remove(file_path_name_ext) except Exception as ex: - if isinstance(ex, FileNotFoundError) or isinstance( - ex, PermissionError - ): + if isinstance(ex, FileNotFoundError) or isinstance(ex, PermissionError): # Probably just attempted to delete a file at the same time as another server. # Randomly backoff to reduce the likelihood of this happening again. time.sleep(random.randint(1, 10) / 10) diff --git a/opencsp/common/lib/process/parallel_file_tools.py b/opencsp/common/lib/process/parallel_file_tools.py index 5da9ae0f..47cfbd95 100644 --- a/opencsp/common/lib/process/parallel_file_tools.py +++ b/opencsp/common/lib/process/parallel_file_tools.py @@ -15,12 +15,7 @@ def _file_or_alternate_existing_file(path_name_ext: str, alternates: list[str]): return None -def wait_on_files( - files: list[str], - timeout: float = 1000, - alternates: dict[str, list[str]] = None, - msg: str = None, -): +def wait_on_files(files: list[str], timeout: float = 1000, alternates: dict[str, list[str]] = None, msg: str = None): """Waits up to 'timeout' seconds for all the files to exist. Note: there is no guarantee that all files exist when this function completes. diff --git a/opencsp/common/lib/process/parallel_video_tools.py b/opencsp/common/lib/process/parallel_video_tools.py index bd4eabd4..0fa23587 100644 --- a/opencsp/common/lib/process/parallel_video_tools.py +++ b/opencsp/common/lib/process/parallel_video_tools.py @@ -80,10 +80,7 @@ def parallel_frames_to_videos( def parallel_video_to_frames( - num_servers: int, - server_index: int, - video_handler: vh.VideoHandler, - server_synchronizer: ss.ServerSynchronizer, + num_servers: int, server_index: int, video_handler: vh.VideoHandler, server_synchronizer: ss.ServerSynchronizer ): """Extract all frames from the given video, where each server extracts the frames for part of the video. To extract all frames, execute this method on each server with that server's server_index. @@ -113,26 +110,15 @@ def parallel_video_to_frames( frame_control = video_handler.frame_control # build the extraction directories for this server - dst_frames_dir_serv = os.path.join( - dst_frames_dir, f"extraction_server_{server_index}" - ) + dst_frames_dir_serv = os.path.join(dst_frames_dir, f"extraction_server_{server_index}") ft.create_directories_if_necessary(dst_frames_dir_serv) if dst_example_frames_dir != None: - dst_example_frames_dir_serv = os.path.join( - dst_example_frames_dir, f"extraction_server_{server_index}" - ) + dst_example_frames_dir_serv = os.path.join(dst_example_frames_dir, f"extraction_server_{server_index}") ft.create_directories_if_necessary(dst_example_frames_dir_serv) - frame_name_format = frame_control.get_outframe_name( - src_video_dir_name_ext, is_example_frames=False - ) - frame_name_format_example = frame_control.get_outframe_name( - src_video_dir_name_ext, is_example_frames=True - ) + frame_name_format = frame_control.get_outframe_name(src_video_dir_name_ext, is_example_frames=False) + frame_name_format_example = frame_control.get_outframe_name(src_video_dir_name_ext, is_example_frames=True) video_handler = vh.VideoHandler.VideoExtractor( - src_video_dir_name_ext, - dst_frames_dir_serv, - dst_example_frames_dir_serv, - frame_control, + src_video_dir_name_ext, dst_frames_dir_serv, dst_example_frames_dir_serv, frame_control ) # determine the number of frames in the video, and which ones this server should extract @@ -151,12 +137,8 @@ def parallel_video_to_frames( video_handler.extract_frames(start_time=rstart, end_time=rend) # remove any duplicates - duplicates_handler = vh.VideoHandler.VideoCreator( - dst_frames_dir_serv, None, None, frame_control - ) - (non_duplicate_frame_files, duplicate_frame_files) = ( - duplicates_handler.identify_duplicate_frames(0, 0) - ) + duplicates_handler = vh.VideoHandler.VideoCreator(dst_frames_dir_serv, None, None, frame_control) + (non_duplicate_frame_files, duplicate_frame_files) = duplicates_handler.identify_duplicate_frames(0, 0) for dup_frame in duplicate_frame_files: dup_frame = os.path.join(dst_frames_dir_serv, dup_frame) ft.delete_file(dup_frame) @@ -174,10 +156,7 @@ def parallel_video_to_frames( server_synchronizer.wait() if server_index == 0: - for dst_dir, fnf in [ - (dst_frames_dir, frame_name_format), - (dst_example_frames_dir, frame_name_format_example), - ]: + for dst_dir, fnf in [(dst_frames_dir, frame_name_format), (dst_example_frames_dir, frame_name_format_example)]: if dst_dir == None: continue diff --git a/opencsp/common/lib/process/subprocess_tools.py b/opencsp/common/lib/process/subprocess_tools.py index 82657c17..e4b83325 100644 --- a/opencsp/common/lib/process/subprocess_tools.py +++ b/opencsp/common/lib/process/subprocess_tools.py @@ -44,9 +44,7 @@ def get_executable_path(executable_name: str, dont_match: str = None) -> str: return executable_name -def filter_lines( - lines: list[pol.ProcessOutputLine], keep_stdout=True, keep_stderr=True -): +def filter_lines(lines: list[pol.ProcessOutputLine], keep_stdout=True, keep_stderr=True): ret: list[pol.ProcessOutputLine] = list(lines) if not keep_stdout: @@ -68,11 +66,7 @@ def print_lines(lines: list[pol.ProcessOutputLine]): def _collect_lines( - lines: list[pol.ProcessOutputLine], - curr_lineno: int, - buffer: str, - is_error=False, - collect_last=False, + lines: list[pol.ProcessOutputLine], curr_lineno: int, buffer: str, is_error=False, collect_last=False ): lineno = curr_lineno @@ -123,12 +117,7 @@ def _is_timed_out(proc: subprocess.Popen, start: float, timeout: float | None): def run( - cmd: str, - cwd: str = None, - stdout: str = None, - stderr: str = None, - ignore_return_code=False, - timeout: float = None, + cmd: str, cwd: str = None, stdout: str = None, stderr: str = None, ignore_return_code=False, timeout: float = None ): """ Runs the given command in the given directory, prints the output to the logger, and checks the return code. @@ -170,14 +159,7 @@ def run( lt.info("changing directory to " + cwd) lt.info("starting " + cmd) text_mode = True # converts stdout/stderr to strings instead of bytes - proc = subprocess.Popen( - cmd, - cwd=cwd, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=text_mode, - ) + proc = subprocess.Popen(cmd, cwd=cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=text_mode) start_time = time.time() new_lines: list[pol.ProcessOutputLine] = [] @@ -201,14 +183,8 @@ def run( # collect the read stdout/stderr bytes into lines if new_out == "" and new_err == "": # no new lines at the moment, take this time to filter & print existing lines - print_lines( - filter_lines( - new_lines, keep_stdout=print_stdout, keep_stderr=print_stderr - ) - ) - lines += filter_lines( - new_lines, keep_stdout=collect_stdout, keep_stderr=collect_stderr - ) + print_lines(filter_lines(new_lines, keep_stdout=print_stdout, keep_stderr=print_stderr)) + lines += filter_lines(new_lines, keep_stdout=collect_stdout, keep_stderr=collect_stderr) new_lines = [] # check timeout @@ -219,46 +195,30 @@ def run( time.sleep(0.1) else: # collect the read stdout/stderr bytes - outbuf, lineno = _collect_lines( - new_lines, lineno, outbuf + new_out, is_error=False - ) - errbuf, lineno = _collect_lines( - new_lines, lineno, errbuf + new_err, is_error=True - ) + outbuf, lineno = _collect_lines(new_lines, lineno, outbuf + new_out, is_error=False) + errbuf, lineno = _collect_lines(new_lines, lineno, errbuf + new_err, is_error=True) # finish collecting from stdout and stderr outbuf += proc.stdout.read() errbuf += proc.stderr.read() # parse any unparsed output - outbuf, lineno = _collect_lines( - new_lines, lineno, outbuf, is_error=False, collect_last=True - ) - errbuf, lineno = _collect_lines( - new_lines, lineno, errbuf, is_error=True, collect_last=True - ) + outbuf, lineno = _collect_lines(new_lines, lineno, outbuf, is_error=False, collect_last=True) + errbuf, lineno = _collect_lines(new_lines, lineno, errbuf, is_error=True, collect_last=True) proc.stdout.close() proc.stderr.close() # print any newly collected lines - print_lines( - filter_lines(new_lines, keep_stdout=print_stdout, keep_stderr=print_stderr) - ) - lines += filter_lines( - new_lines, keep_stdout=collect_stdout, keep_stderr=collect_stderr - ) + print_lines(filter_lines(new_lines, keep_stdout=print_stdout, keep_stderr=print_stderr)) + lines += filter_lines(new_lines, keep_stdout=collect_stdout, keep_stderr=collect_stderr) # throw an error code if the subprocess failed, or return the unprinted lines otherwise if proc.returncode != 0 and not ignore_return_code: stdout_str = None stderr_str = None try: - stdout_str = "\n".join( - [line.val for line in filter_lines(lines, True, False)] - ) - stderr_str = "\n".join( - [line.val for line in filter_lines(lines, False, True)] - ) + stdout_str = "\n".join([line.val for line in filter_lines(lines, True, False)]) + stderr_str = "\n".join([line.val for line in filter_lines(lines, False, True)]) except: pass raise cpe.CalledProcessError(proc.returncode, proc.args, stdout_str, stderr_str) diff --git a/opencsp/common/lib/process/test/lib/subprocess_test_helper.py b/opencsp/common/lib/process/test/lib/subprocess_test_helper.py index 046ba09c..ce79ac89 100644 --- a/opencsp/common/lib/process/test/lib/subprocess_test_helper.py +++ b/opencsp/common/lib/process/test/lib/subprocess_test_helper.py @@ -4,39 +4,17 @@ if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser( - prog=__file__.rstrip(".py"), description='Tests the subprocess tools.' - ) - parser.add_argument( - '--simple_stdout', - action='store_true', - help="Outputs 'Hello\nworld!' on stdout.", - ) - parser.add_argument( - '--simple_stderr', - action='store_true', - help="Outputs 'Goodbye\nworld!' on stderr.", - ) + parser = argparse.ArgumentParser(prog=__file__.rstrip(".py"), description='Tests the subprocess tools.') + parser.add_argument('--simple_stdout', action='store_true', help="Outputs 'Hello\nworld!' on stdout.") + parser.add_argument('--simple_stderr', action='store_true', help="Outputs 'Goodbye\nworld!' on stderr.") parser.add_argument( '--mixed_stdout_stderr', action='store_true', help="Outputs 'foo', 'bar', and 'baz' on stdout, stderr, and stdout, respectively.", ) - parser.add_argument( - '--retcode', - type=int, - help="Causes this program to exit with the given retcode.", - ) - parser.add_argument( - '--delay_before', - type=float, - help="Sleeps for N seconds before outputing any values.", - ) - parser.add_argument( - '--delay_after', - type=float, - help="Sleeps for N seconds after outputing any values.", - ) + parser.add_argument('--retcode', type=int, help="Causes this program to exit with the given retcode.") + parser.add_argument('--delay_before', type=float, help="Sleeps for N seconds before outputing any values.") + parser.add_argument('--delay_after', type=float, help="Sleeps for N seconds after outputing any values.") args = parser.parse_args() if args.delay_before != None: diff --git a/opencsp/common/lib/process/test/test_MemoryMonitor.py b/opencsp/common/lib/process/test/test_MemoryMonitor.py index f6f664d7..fab7eba6 100644 --- a/opencsp/common/lib/process/test/test_MemoryMonitor.py +++ b/opencsp/common/lib/process/test/test_MemoryMonitor.py @@ -25,26 +25,17 @@ def test_max_lifetime_hours(self): monitor = mm.MemoryMonitor(max_lifetime_hours=(secs / (60 * 60))) monitor.start() time.sleep(1) - self.assertFalse( - monitor.done(), - "Monitor should not have exited on its own within one second", - ) + self.assertFalse(monitor.done(), "Monitor should not have exited on its own within one second") self._wait_done(monitor, 4) - self.assertTrue( - monitor.done(), "Monitor should have exited on its own within 5 seconds" - ) + self.assertTrue(monitor.done(), "Monitor should have exited on its own within 5 seconds") def test_zero_lifetime_hours(self): monitor = mm.MemoryMonitor(max_lifetime_hours=0) monitor.start() time.sleep(1) - self.assertEqual( - len(monitor._log), 0, "Monitor should not have had time to record any logs" - ) + self.assertEqual(len(monitor._log), 0, "Monitor should not have had time to record any logs") self._wait_done(monitor, 4) - self.assertTrue( - monitor.done(), "Monitor should have exited on its own within 5 seconds" - ) + self.assertTrue(monitor.done(), "Monitor should have exited on its own within 5 seconds") def test_stop(self): monitor = mm.MemoryMonitor() diff --git a/opencsp/common/lib/process/test/test_MultiprocessNonDaemonic.py b/opencsp/common/lib/process/test/test_MultiprocessNonDaemonic.py index 495306df..84a2c9c0 100644 --- a/opencsp/common/lib/process/test/test_MultiprocessNonDaemonic.py +++ b/opencsp/common/lib/process/test/test_MultiprocessNonDaemonic.py @@ -35,9 +35,7 @@ def test_single_process_retval(self): """Test that we get the expected return value.""" pool = mnd.MultiprocessNonDaemonic(1) results = pool.starmap(self.add_two, [[0]]) - self.assertEqual( - len(results), 1, "Should get one result back from the single process" - ) + self.assertEqual(len(results), 1, "Should get one result back from the single process") self.assertEqual(results[0], 2, "Subprocess should have added 2 to 0") def test_many_process_retval(self): @@ -46,12 +44,8 @@ def test_many_process_retval(self): outputs = [i + 2 for i in range(20)] pool = mnd.MultiprocessNonDaemonic(6) results = pool.starmap(self.add_two, inputs) - self.assertEqual( - len(results), 20, "Should get 20 results back from the 20 processes" - ) - self.assertEqual( - results, outputs, "Subprocess should have added 2 to each value" - ) + self.assertEqual(len(results), 20, "Should get 20 results back from the 20 processes") + self.assertEqual(results, outputs, "Subprocess should have added 2 to each value") def test_many_process_retval_randsleep(self): """Test that we get the expected return values, even when the subprocesses complete at random times.""" @@ -59,17 +53,9 @@ def test_many_process_retval_randsleep(self): outputs = [i + 2 for i in range(20)] pool = mnd.MultiprocessNonDaemonic(20) results = pool.starmap(self.add_two_randsleep, inputs) - self.assertEqual( - len(results), 20, "Should get 20 results back from the 20 processes" - ) - self.assertEqual( - sorted(results), outputs, "Subprocess should have added 2 to each value" - ) - self.assertEqual( - results, - outputs, - "Output order from pool.starmap() should be the same as input order", - ) + self.assertEqual(len(results), 20, "Should get 20 results back from the 20 processes") + self.assertEqual(sorted(results), outputs, "Subprocess should have added 2 to each value") + self.assertEqual(results, outputs, "Output order from pool.starmap() should be the same as input order") if __name__ == '__main__': diff --git a/opencsp/common/lib/process/test/test_ParallelPartitioner.py b/opencsp/common/lib/process/test/test_ParallelPartitioner.py index 69425b11..2b0de579 100644 --- a/opencsp/common/lib/process/test/test_ParallelPartitioner.py +++ b/opencsp/common/lib/process/test/test_ParallelPartitioner.py @@ -7,67 +7,51 @@ class TestParallelPartitioner(unittest.TestCase): def test_S1s0C1c0_list1(self): - partitioner = ppart.ParallelPartitioner( - nservers=1, server_idx=0, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=1, server_idx=0, ncpus=1, cpu_idx=0) data = ['a'] portion = partitioner.get_my_portion(data) self.assertEqual(portion, data) def test_S1s0C1c0_list1000(self): - partitioner = ppart.ParallelPartitioner( - nservers=1, server_idx=0, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=1, server_idx=0, ncpus=1, cpu_idx=0) data = ['a'] * 1000 portion = partitioner.get_my_portion(data) self.assertEqual(len(portion), 1000) self.assertEqual(portion, data) def test_S2s0C1c0_list1(self): - partitioner = ppart.ParallelPartitioner( - nservers=2, server_idx=0, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=2, server_idx=0, ncpus=1, cpu_idx=0) data = ['a'] portion = partitioner.get_my_portion(data) self.assertEqual(portion, []) def test_S2s1C1c0_list1(self): - partitioner = ppart.ParallelPartitioner( - nservers=2, server_idx=1, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=2, server_idx=1, ncpus=1, cpu_idx=0) data = ['a'] portion = partitioner.get_my_portion(data) self.assertEqual(portion, data) def test_S2s0C1c0_list2(self): - partitioner = ppart.ParallelPartitioner( - nservers=2, server_idx=0, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=2, server_idx=0, ncpus=1, cpu_idx=0) data = ['a', 'b'] portion = partitioner.get_my_portion(data) self.assertEqual(portion, ['a']) def test_S2s1C1c0_list2(self): - partitioner = ppart.ParallelPartitioner( - nservers=2, server_idx=1, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=2, server_idx=1, ncpus=1, cpu_idx=0) data = ['a', 'b'] portion = partitioner.get_my_portion(data) self.assertEqual(portion, ['b']) def test_S2s0C1c0_list1000(self): - partitioner = ppart.ParallelPartitioner( - nservers=2, server_idx=0, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=2, server_idx=0, ncpus=1, cpu_idx=0) data = (['a'] * 500) + (['b'] * 500) portion = partitioner.get_my_portion(data) self.assertEqual(len(portion), 500) self.assertEqual(portion, ['a'] * 500) def test_S2s1C1c0_list1000(self): - partitioner = ppart.ParallelPartitioner( - nservers=2, server_idx=1, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=2, server_idx=1, ncpus=1, cpu_idx=0) data = (['a'] * 500) + (['b'] * 500) portion = partitioner.get_my_portion(data) self.assertEqual(len(portion), 500) @@ -82,9 +66,7 @@ def test_S50ssC1c0_list25000(self): data += copy.deepcopy(alphabet) for s in range(50): - partitioner = ppart.ParallelPartitioner( - nservers=50, server_idx=s, ncpus=1, cpu_idx=0 - ) + partitioner = ppart.ParallelPartitioner(nservers=50, server_idx=s, ncpus=1, cpu_idx=0) portion = partitioner.get_my_portion(data) self.assertEqual(len(portion), 25000 / 50) self.assertEqual(portion, alphabet) @@ -107,9 +89,7 @@ def test_S1s0CNcc_list100(self): for N in range(1, 100 * 2): data_portioned = [] for c in range(N): - partitioner = ppart.ParallelPartitioner( - nservers=1, server_idx=0, ncpus=N, cpu_idx=c - ) + partitioner = ppart.ParallelPartitioner(nservers=1, server_idx=0, ncpus=N, cpu_idx=c) portion = partitioner.get_my_portion(data) data_portioned += portion self.assertLessEqual(len(portion), math.ceil(100 / N)) diff --git a/opencsp/common/lib/process/test/test_parallel_file_tools.py b/opencsp/common/lib/process/test/test_parallel_file_tools.py index e76db2f9..72c2991f 100644 --- a/opencsp/common/lib/process/test/test_parallel_file_tools.py +++ b/opencsp/common/lib/process/test/test_parallel_file_tools.py @@ -9,9 +9,7 @@ class TestSubprocess(unittest.TestCase): - path = os.path.join( - 'common', 'lib', 'process', 'test', 'data', 'output', 'parallel_file_tools' - ) + path = os.path.join('common', 'lib', 'process', 'test', 'data', 'output', 'parallel_file_tools') def setUp(self): super().setUp() diff --git a/opencsp/common/lib/process/test/test_subprocess_tools.py b/opencsp/common/lib/process/test/test_subprocess_tools.py index 23be4c44..0424dbb4 100644 --- a/opencsp/common/lib/process/test/test_subprocess_tools.py +++ b/opencsp/common/lib/process/test/test_subprocess_tools.py @@ -13,11 +13,7 @@ class TestSubprocess(unittest.TestCase): def test_echo(self): output = subt.run(f"echo 'hello, world!'") self.assertEqual(len(output), 1, f"Unexpected output:\n\t{output}") - self.assertEqual( - output[0].val.strip().strip("'"), - 'hello, world!', - f"Unexpected output:\n\t{output}", - ) + self.assertEqual(output[0].val.strip().strip("'"), 'hello, world!', f"Unexpected output:\n\t{output}") def test_success(self): subt.run(f"{sys.executable} {helper.__file__} --retcode=0") @@ -27,9 +23,7 @@ def test_failure(self): self.assertRaises(subprocess.CalledProcessError, func) def test_get_stdout(self): - output = subt.run( - f"{sys.executable} {helper.__file__} --simple_stdout", stdout="collect" - ) + output = subt.run(f"{sys.executable} {helper.__file__} --simple_stdout", stdout="collect") self.assertEqual(len(output), 2, f"Unexpected output:\n\t{output}") self.assertEqual(output[0].val, "Hello", f"Unexpected output:\n\t{output}") self.assertEqual(output[1].val, "world!", f"Unexpected output:\n\t{output}") @@ -38,9 +32,7 @@ def test_get_stdout(self): self.assertEqual(output[i].lineno, i) def test_get_stderr(self): - output = subt.run( - f"{sys.executable} {helper.__file__} --simple_stderr", stderr="collect" - ) + output = subt.run(f"{sys.executable} {helper.__file__} --simple_stderr", stderr="collect") self.assertEqual(len(output), 2, f"Unexpected output:\n\t{output}") self.assertEqual(output[0].val, "Goodbye", f"Unexpected output:\n\t{output}") self.assertEqual(output[1].val, "world!", f"Unexpected output:\n\t{output}") @@ -53,9 +45,7 @@ def test_get_mixed_stdout_stderr(self): # can't get mixed output on windows return output = subt.run( - f"{sys.executable} {helper.__file__} --mixed_stdout_stderr", - stdout="collect", - stderr="collect", + f"{sys.executable} {helper.__file__} --mixed_stdout_stderr", stdout="collect", stderr="collect" ) self.assertEqual(len(output), 3, f"Unexpected output:\n\t{output}") self.assertEqual(output[0].val, "foo", f"Unexpected output:\n\t{output}") @@ -78,20 +68,12 @@ def test_timeout_completes(self): ) self.assertLess(time.time() - start_time, 4.9) self.assertEqual(len(output), 2, f"Unexpected output:\n\t{output}") - self.assertEqual( - output[0].val.strip().strip("'"), 'Hello', f"Unexpected output:\n\t{output}" - ) - self.assertEqual( - output[1].val.strip().strip("'"), - 'world!', - f"Unexpected output:\n\t{output}", - ) + self.assertEqual(output[0].val.strip().strip("'"), 'Hello', f"Unexpected output:\n\t{output}") + self.assertEqual(output[1].val.strip().strip("'"), 'world!', f"Unexpected output:\n\t{output}") def test_timeout_timesout(self): """Test that a program that executes in 0.9 seconds is killed by a 0.4s timeout.""" - with self.assertRaises( - (subprocess.CalledProcessError, subprocess.TimeoutExpired) - ): + with self.assertRaises((subprocess.CalledProcessError, subprocess.TimeoutExpired)): start_time = time.time() output = subt.run( f"{sys.executable} {helper.__file__} --simple_stdout --simple_stderr --delay_before=0.5 --delay_after=0.4", diff --git a/opencsp/common/lib/render/ImageAttributeParser.py b/opencsp/common/lib/render/ImageAttributeParser.py index 8899d76b..2355fdaa 100644 --- a/opencsp/common/lib/render/ImageAttributeParser.py +++ b/opencsp/common/lib/render/ImageAttributeParser.py @@ -60,9 +60,7 @@ def __init__( except: pass if self._previous_attr != None: - prev_image_attr: ImageAttributeParser = self._previous_attr.get_parser( - ImageAttributeParser - ) + prev_image_attr: ImageAttributeParser = self._previous_attr.get_parser(ImageAttributeParser) # Sanity check: are we trying to overwrite the "original_image_source" value? if prev_image_attr != None: @@ -84,13 +82,9 @@ def set_defaults(self, other: 'ImageAttributeParser'): # Specifically for image attributes, for original image source, we # really want to maintain the absolute original image source throughout # all processing steps and files. - self.original_image_source = tt.default( - other.original_image_source, self.original_image_source - ) + self.original_image_source = tt.default(other.original_image_source, self.original_image_source) # the rest of these attributes can be set as normal - self.current_image_source = tt.default( - self.current_image_source, other.current_image_source - ) + self.current_image_source = tt.default(self.current_image_source, other.current_image_source) self.date_collected = tt.default(self.date_collected, other.date_collected) self.experiment_name = tt.default(self.experiment_name, other.experiment_name) self.notes = tt.default(self.notes, other.notes) @@ -104,16 +98,12 @@ def has_contents(self) -> bool: or (self.notes != None) ) - def parse_my_contents( - self, file_path_name_ext: str, raw_contents: str, my_contents: any - ): + def parse_my_contents(self, file_path_name_ext: str, raw_contents: str, my_contents: any): self.current_image_source = my_contents['current_image_source'] self.original_image_source = my_contents['original_image_source'] self.date_collected = None if my_contents['date_collected'] != None: - self.date_collected = datetime.datetime.fromisoformat( - my_contents['date_collected'] - ) + self.date_collected = datetime.datetime.fromisoformat(my_contents['date_collected']) self.experiment_name = my_contents['experiment_name'] self.notes = my_contents['notes'] diff --git a/opencsp/common/lib/render/PlotAnnotation.py b/opencsp/common/lib/render/PlotAnnotation.py index dd1ee924..c47294a0 100644 --- a/opencsp/common/lib/render/PlotAnnotation.py +++ b/opencsp/common/lib/render/PlotAnnotation.py @@ -33,9 +33,7 @@ def __init__( self.text = text self.style = style - def plot( - self, crop_box=None - ): # Crop box is [[x_min, y_min], [x_max, y_max]] or None. Only applies to 2-d points. + def plot(self, crop_box=None): # Crop box is [[x_min, y_min], [x_max, y_max]] or None. Only applies to 2-d points. """ Plot the annotation. assumes that desired plot is active. """ @@ -46,8 +44,7 @@ def plot( if len(pt0) == 2: cropped_pt_list = self.crop_pt_list_to_box(self.pt_list, crop_box) if (len(cropped_pt_list) < len(self.pt_list)) and ( - (isinstance(self.style, type(rcps.default()))) - and (self.style.linestyle != 'None') + (isinstance(self.style, type(rcps.default()))) and (self.style.linestyle != 'None') ): print( 'WARNING: In PlotAnnotation.plot(), current implementation of point cropping does not ensure proper drawing of lines connecting points.' @@ -128,17 +125,11 @@ def plot( # Error trap. else: - print( - 'ERROR: In PlotAnnotation.plot(), unexpected type="' - + str(type) - + '" encountered.' - ) + print('ERROR: In PlotAnnotation.plot(), unexpected type="' + str(type) + '" encountered.') assert False def image_draw( - self, - image, # Image to annotate. Modifies image as a side effect. - crop_box=None, + self, image, crop_box=None # Image to annotate. Modifies image as a side effect. ): # OpenCV drawing routines automatically crop to image boundaries. # Only use this if you want to crop to a box that is a subset of the image. # crop_box is [[x_min, y_min], [x_max, y_max]] or None. @@ -183,17 +174,14 @@ def image_draw( # Crop to box, if given. (Recall that OpenCV automatically crops to image boundary.) cropped_pt_list = self.crop_pt_list_to_box(self.pt_list, crop_box) if (len(cropped_pt_list) < len(self.pt_list)) and ( - (isinstance(self.style, type(rcps.default()))) - and (self.style.linestyle != 'None') + (isinstance(self.style, type(rcps.default()))) and (self.style.linestyle != 'None') ): print( 'WARNING: In PlotAnnotation.image_draw(), current implementation of point cropping does not ensure proper drawing of lines connecting points.' ) # Connecting lines. if (len(cropped_pt_list) > 1) and (self.style.linestyle != 'None'): - if (self.style.linestyle != '-') and ( - self.style.linestyle != 'solid' - ): + if (self.style.linestyle != '-') and (self.style.linestyle != 'solid'): print( 'WARNING: In PlotAnnotation.image_draw(), dashed or dotted lines are not implemented yet. Drawing a solid line instead.' ) @@ -207,14 +195,7 @@ def image_draw( if prev_int_pt is not None: connect_line_type = self.opencv_line_type() # Draw connecting line. - cv.line( - image, - prev_int_pt, - int_pt, - color, - thickness, - connect_line_type, - ) + cv.line(image, prev_int_pt, int_pt, color, thickness, connect_line_type) prev_int_pt = int_pt # Markers. if self.style.marker != 'None': @@ -246,14 +227,7 @@ def image_draw( color = self.opencv_color(plot_color) marker_line_type = self.opencv_line_type() # Draw marker. - cv.circle( - image, - center, - radius, - color, - thickness, - marker_line_type, - ) + cv.circle(image, center, radius, color, thickness, marker_line_type) # Label text. if (self.text is not None) and (len(self.text) > 0): print( @@ -263,9 +237,7 @@ def image_draw( # Text. elif self.type == 'text': if (self.pt_list != None) and (len(self.pt_list) > 0): - int_or_float_pt0 = self.pt_list[ - 0 - ] # Any points beyond first are ignored. + int_or_float_pt0 = self.pt_list[0] # Any points beyond first are ignored. if len(int_or_float_pt0) != 2: print( 'ERROR: In PlotAnnotation.image_draw(), when drawing a text string, unexpected point length len(int_or_float_pt0)="' @@ -280,9 +252,7 @@ def image_draw( color = self.opencv_color(self.style.color) font = self.opencv_font(self.style.fontstyle) font_scale = self.opencv_font_scale(self.style.fontsize) - font_thickness = self.opencv_font_thickness( - self.style.fontweight - ) + font_thickness = self.opencv_font_thickness(self.style.fontweight) font_line_type = self.opencv_font_line_type() origin = self.opencv_origin( self.text, @@ -294,24 +264,11 @@ def image_draw( self.style.verticalalignment, ) # Draw text. - cv.putText( - image, - self.text, - origin, - font, - font_scale, - color, - font_thickness, - font_line_type, - ) + cv.putText(image, self.text, origin, font, font_scale, color, font_thickness, font_line_type) # Error trap. else: - print( - 'ERROR: In PlotAnnotation.image_draw(), unexpected type="' - + str(type) - + '" encountered.' - ) + print('ERROR: In PlotAnnotation.image_draw(), unexpected type="' + str(type) + '" encountered.') assert False def crop_pt_list_to_box(self, input_point_list, crop_box): @@ -466,14 +423,7 @@ def opencv_font_thickness(self, plot_fontweight): return thickness def opencv_origin( - self, - text, - int_pt, - font, - font_scale, - font_thickness, - plot_horizontalalignment, - plot_verticalalignment, + self, text, int_pt, font, font_scale, font_thickness, plot_horizontalalignment, plot_verticalalignment ): # Determine size of text. text_box, baseline = cv.getTextSize(text, font, font_scale, font_thickness) @@ -521,36 +471,25 @@ def outline_annotation(point_list, color='k', linewidth=1): """ Outlines of physical objects. """ - return PlotAnnotation( - 'point_seq', point_list, None, rcps.outline(color=color, linewidth=linewidth) - ) + return PlotAnnotation('point_seq', point_list, None, rcps.outline(color=color, linewidth=linewidth)) def data_curve_annotation(point_list, color='b', linewidth=1): """ A data curve with data points identified. """ - return PlotAnnotation( - 'point_seq', point_list, None, rcps.data_curve(color=color, linewidth=linewidth) - ) + return PlotAnnotation('point_seq', point_list, None, rcps.data_curve(color=color, linewidth=linewidth)) def marker_annotation(point_list, marker='o', color='b', markersize=3): """ A data curve with data points identified. """ - return PlotAnnotation( - 'point_seq', point_list, None, rcps.marker(marker='o', color='b', markersize=3) - ) + return PlotAnnotation('point_seq', point_list, None, rcps.marker(marker='o', color='b', markersize=3)) def text_annotation( - point, - text_str, - fontsize='medium', - color='b', - horizontalalignment='center', - verticalalignment='center', + point, text_str, fontsize='medium', color='b', horizontalalignment='center', verticalalignment='center' ): """ A text annotation. @@ -560,9 +499,6 @@ def text_annotation( [point], text_str, rctxt.RenderControlText( - fontsize=fontsize, - color=color, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, + fontsize=fontsize, color=color, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment ), ) diff --git a/opencsp/common/lib/render/PowerpointSlide.py b/opencsp/common/lib/render/PowerpointSlide.py index d81e97fd..28da4d7d 100644 --- a/opencsp/common/lib/render/PowerpointSlide.py +++ b/opencsp/common/lib/render/PowerpointSlide.py @@ -6,9 +6,7 @@ from opencsp.common.lib.render.lib.PowerpointImage import PowerpointImage from opencsp.common.lib.render.lib.PowerpointText import PowerpointText -from opencsp.common.lib.render_control.RenderControlPowerpointSlide import ( - RenderControlPowerpointSlide, -) +from opencsp.common.lib.render_control.RenderControlPowerpointSlide import RenderControlPowerpointSlide import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.log_tools as lt @@ -46,27 +44,19 @@ def __init__( self.texts[idx] = pps_text._replace(is_title=False) @classmethod - def template_title( - cls, title: str, authors: str, slide_control: RenderControlPowerpointSlide - ) -> "PowerpointSlide": + def template_title(cls, title: str, authors: str, slide_control: RenderControlPowerpointSlide) -> "PowerpointSlide": raise NotImplementedError @classmethod - def template_planning( - cls, slide_control: RenderControlPowerpointSlide = None - ) -> "PowerpointSlide": + def template_planning(cls, slide_control: RenderControlPowerpointSlide = None) -> "PowerpointSlide": raise NotImplementedError @classmethod - def template_overview( - cls, slide_control: RenderControlPowerpointSlide = None - ) -> "PowerpointSlide": + def template_overview(cls, slide_control: RenderControlPowerpointSlide = None) -> "PowerpointSlide": raise NotImplementedError @classmethod - def template_content_simple( - cls, slide_control: RenderControlPowerpointSlide = None - ) -> "PowerpointSlide": + def template_content_simple(cls, slide_control: RenderControlPowerpointSlide = None) -> "PowerpointSlide": """Information-containing slide with a bulleted description on the left.""" raise NotImplementedError @@ -129,13 +119,7 @@ def template_content_grid( height = slide_control.slide_size[1] - (top + bottom) content_dims = left, top, width, height get_cell_dims = lambda row_idx, col_idx: cls._get_cell_dims( - content_dims, - inter_cell_buffer, - inter_cell_buffer + caption_size, - nrows, - ncols, - row_idx, - col_idx, + content_dims, inter_cell_buffer, inter_cell_buffer + caption_size, nrows, ncols, row_idx, col_idx ) # generate texts @@ -180,12 +164,7 @@ def _next_empty_cell_index(self, which_shapes='any'): return idx - def add_image( - self, - image: PowerpointImage | typing.Any, - fit_or_stretch: str = None, - index: int = -1, - ): + def add_image(self, image: PowerpointImage | typing.Any, fit_or_stretch: str = None, index: int = -1): """Add an image to this slide. If this slide has predefined spaces still available, then fits this image into the first of those spaces. @@ -202,8 +181,7 @@ def add_image( image: PowerpointImage = copy.copy(image) elif image is None: lt.error_and_raise( - ValueError, - "Error: in PowerpointSlide.add_image(), can't add a 'None' type image to a slide.", + ValueError, "Error: in PowerpointSlide.add_image(), can't add a 'None' type image to a slide." ) else: image: PowerpointImage = PowerpointImage(image) @@ -238,13 +216,9 @@ def add_image( self.images[index] = image image.fit_to_cell_dimensions(old_image.cell_dims) - def add_text( - self, text: PowerpointText, index: int = -1, replace_or_shift="replace" - ): + def add_text(self, text: PowerpointText, index: int = -1, replace_or_shift="replace"): if replace_or_shift not in ["replace", "shift"]: - lt.error_and_raise( - ValueError, f"Invalid argument replace_or_shift=\"{replace_or_shift}\"" - ) + lt.error_and_raise(ValueError, f"Invalid argument replace_or_shift=\"{replace_or_shift}\"") # set this as the text's parent text = copy.copy(text) @@ -295,23 +269,11 @@ def set_title(self, title: str | PowerpointText): slide_dims = 0, 0, *self.slide_control.slide_size if title_text != None: dims = title_text.dims - text = PowerpointText( - title, - dims=dims, - cell_dims=slide_dims, - is_title=True, - parent_slide=self, - ) + text = PowerpointText(title, dims=dims, cell_dims=slide_dims, is_title=True, parent_slide=self) else: title_x, title_y = self.slide_control.title_location dims = title_x, title_y, slide_dims[2], 1 - text = PowerpointText( - title, - dims=dims, - cell_dims=slide_dims, - is_title=True, - parent_slide=self, - ) + text = PowerpointText(title, dims=dims, cell_dims=slide_dims, is_title=True, parent_slide=self) text.compute_and_assign_height(self.slide_control.title_size) else: text: PowerpointText = title @@ -319,9 +281,7 @@ def set_title(self, title: str | PowerpointText): # add the text instance if self.title_text_idx != None: - new_title_text = self.add_text( - text, self.title_text_idx, replace_or_shift="replace" - ) + new_title_text = self.add_text(text, self.title_text_idx, replace_or_shift="replace") else: new_title_text = self.add_text(text, 0, replace_or_shift="shift") @@ -332,10 +292,7 @@ def set_title(self, title: str | PowerpointText): def get_non_title_texts(self): if self.title_text_idx != None: - non_title_texts = ( - self.texts[: self.title_text_idx] - + self.texts[self.title_text_idx + 1 :] - ) + non_title_texts = self.texts[: self.title_text_idx] + self.texts[self.title_text_idx + 1 :] return non_title_texts return self.texts @@ -377,9 +334,7 @@ def clear_tmp_saved_images_files(self): # also remove all possible images for this slide's index if self.slide_control.slide_index >= 0: - pattern = PowerpointImage._get_save_dir_name_ext_pattern( - self.slide_control.slide_index, for_glob=True - ) + pattern = PowerpointImage._get_save_dir_name_ext_pattern(self.slide_control.slide_index, for_glob=True) dir, name, ext = ft.path_components(pattern) ft.delete_files_in_directory(dir, name + ext, error_on_dir_not_exists=False) @@ -506,12 +461,8 @@ def to_txt_file(self, file_path_name_ext: str): # save references to the images and texts to the save directory with open(file_path_name_ext, "w") as fout: non_null = lambda v: v != None - image_name_exts = list( - filter(non_null, [image.save() for image in self.images]) - ) - text_name_exts = list( - filter(non_null, [text.save() for text in self.texts]) - ) + image_name_exts = list(filter(non_null, [image.save() for image in self.images])) + text_name_exts = list(filter(non_null, [text.save() for text in self.texts])) fout.write("PowerpointSlide\n") fout.write("v1\n") @@ -523,9 +474,7 @@ def to_txt_file(self, file_path_name_ext: str): fout.write(text_name_ext + "\n") @classmethod - def from_txt_file( - cls, file_path_name_ext: str, slide_control: RenderControlPowerpointSlide = None - ): + def from_txt_file(cls, file_path_name_ext: str, slide_control: RenderControlPowerpointSlide = None): if slide_control == None: slide_control = RenderControlPowerpointSlide() diff --git a/opencsp/common/lib/render/VideoHandler.py b/opencsp/common/lib/render/VideoHandler.py index b1c462dd..acfeab4e 100644 --- a/opencsp/common/lib/render/VideoHandler.py +++ b/opencsp/common/lib/render/VideoHandler.py @@ -78,23 +78,15 @@ def __init__( self.dst_video_dir_name_ext = dst_video_dir_name_ext self.src_frames_dir = src_frames_dir self.dst_frames_dir = ( - dst_frames_dir - if dst_frames_dir != None - else os.path.join(ft.default_output_path(), "output_frames") + dst_frames_dir if dst_frames_dir != None else os.path.join(ft.default_output_path(), "output_frames") ) self.dst_example_frames_dir = ( dst_example_frames_dir if dst_example_frames_dir != None else os.path.join(ft.default_output_path(), "example_frames") ) - self.video_control = ( - video_control if video_control != None else rcv.RenderControlVideo.default() - ) - self.frame_control = ( - frame_control - if frame_control != None - else rcvf.RenderControlVideoFrames.default() - ) + self.video_control = video_control if video_control != None else rcv.RenderControlVideo.default() + self.frame_control = frame_control if frame_control != None else rcvf.RenderControlVideoFrames.default() @classmethod def VideoInspector(cls, src_video_dir_name_ext: str): @@ -118,18 +110,13 @@ def VideoCreator( @classmethod def VideoMerger(cls, src_videos_path, src_videos_ext, dst_video_dir_name_ext): return cls( - src_video_dir_name_ext=os.path.join( - src_videos_path, f"NA.{src_videos_ext}" - ), + src_video_dir_name_ext=os.path.join(src_videos_path, f"NA.{src_videos_ext}"), dst_video_dir_name_ext=dst_video_dir_name_ext, ) @classmethod def VideoTransformer( - cls, - src_video_dir_name_ext: str, - dst_video_dir_name_ext: str, - video_control: rcv.RenderControlVideo, + cls, src_video_dir_name_ext: str, dst_video_dir_name_ext: str, video_control: rcv.RenderControlVideo ): return cls( src_video_dir_name_ext=src_video_dir_name_ext, @@ -266,9 +253,7 @@ def extract_frames(self, start_time: float = None, end_time: float = None): if run == "output": frame_dir, _, frame_ext = ft.path_components(frame_path_name_ext) _, input_video_body, _ = ft.path_components(self.src_video_dir_name_ext) - n_frames = ft.count_items_in_directory( - frame_dir, name_prefix=input_video_body, name_suffix=frame_ext - ) + n_frames = ft.count_items_in_directory(frame_dir, name_prefix=input_video_body, name_suffix=frame_ext) # Return. return n_frames @@ -289,15 +274,11 @@ def get_extracted_frame_path_and_name_format(self, frame_type="output"): frame_dir = self.dst_frames_dir is_example_frames = False _, input_video_body, _ = ft.path_components(self.src_video_dir_name_ext) - return self.frame_control.get_outframe_path_name_ext( - frame_dir, input_video_body, is_example_frames - ) + return self.frame_control.get_outframe_path_name_ext(frame_dir, input_video_body, is_example_frames) # FILTERING DUPLICATE FRAMES # - def identify_duplicate_frames( - self, tolerance_image_size: int, tolerance_image_pixel: int - ): + def identify_duplicate_frames(self, tolerance_image_size: int, tolerance_image_pixel: int): """Finds all frame duplicates in self.src_frames_dir. Args: @@ -318,27 +299,19 @@ def identify_duplicate_frames( ) # Fetch list of all frame filenames. - input_frame_file_size_pair_list = ft.files_in_directory_with_associated_sizes( - self.src_frames_dir, sort=True - ) + input_frame_file_size_pair_list = ft.files_in_directory_with_associated_sizes(self.src_frames_dir, sort=True) n_input_frames = len(input_frame_file_size_pair_list) # Construct the sequence of frames without duplicates, and also the list of duplicate frames omitted. if n_input_frames < 2: # Then there cannot be any duplicates. - non_duplicate_frame_files = [ - ft.file_size_pair_name(pair) for pair in input_frame_file_size_pair_list - ] + non_duplicate_frame_files = [ft.file_size_pair_name(pair) for pair in input_frame_file_size_pair_list] duplicate_frame_files = [] else: # Loop through frame files, looking for duplicates. previous_frame_file_size_pair = input_frame_file_size_pair_list[0] - non_duplicate_frame_files = [ - ft.file_size_pair_name(previous_frame_file_size_pair) - ] - duplicate_frame_files: list[str] = ( - [] - ) # First frame is never a duplicate of preceding. + non_duplicate_frame_files = [ft.file_size_pair_name(previous_frame_file_size_pair)] + duplicate_frame_files: list[str] = [] # First frame is never a duplicate of preceding. for this_frame_file_size_pair in input_frame_file_size_pair_list[1:]: if self._this_frame_is_a_duplicate_of_previous( previous_frame_file_size_pair, @@ -347,19 +320,12 @@ def identify_duplicate_frames( tolerance_image_pixel, ): # Then this frame is a duplicate. - duplicate_frame_files.append( - ft.file_size_pair_name(this_frame_file_size_pair) - ) + duplicate_frame_files.append(ft.file_size_pair_name(this_frame_file_size_pair)) if len(duplicate_frame_files) == 1: - lt.info( - "Found at least one duplicate frame: " - + duplicate_frame_files[0] - ) + lt.info("Found at least one duplicate frame: " + duplicate_frame_files[0]) else: # This frame is not a duplicate. - non_duplicate_frame_files.append( - ft.file_size_pair_name(this_frame_file_size_pair) - ) + non_duplicate_frame_files.append(ft.file_size_pair_name(this_frame_file_size_pair)) previous_frame_file_size_pair = this_frame_file_size_pair # Return. @@ -399,9 +365,7 @@ def _this_frame_is_a_duplicate_of_previous( # if abs(this_size - previous_size) <= tolerance_image_size: # Frame JPEG files are the same size. They might be identical, so check content. - if self._frames_are_identical( - previous_file, this_file, tolerance_image_pixel - ): + if self._frames_are_identical(previous_file, this_file, tolerance_image_pixel): return True else: return False @@ -409,9 +373,7 @@ def _this_frame_is_a_duplicate_of_previous( # Frame JPEG files are different sizes. We conclude they cannot be identical. return False - def _frames_are_identical( - self, previous_frame_file: str, this_frame_file: str, tolerance_image_pixel: int - ): + def _frames_are_identical(self, previous_frame_file: str, this_frame_file: str, tolerance_image_pixel: int): """Determine if the given frames are identical. Args: @@ -426,17 +388,13 @@ def _frames_are_identical( previous_dir_body_ext = os.path.join( self.src_frames_dir, previous_frame_file ) # Already includes the extension. - this_dir_body_ext = os.path.join( - self.src_frames_dir, this_frame_file - ) # Already includes the extension. + this_dir_body_ext = os.path.join(self.src_frames_dir, this_frame_file) # Already includes the extension. lt.debug('\nIn frames_are_identical(), loading image:', previous_dir_body_ext) previous_img = cv.imread(previous_dir_body_ext) lt.debug('In frames_are_identical(), loading image:', this_dir_body_ext) this_img = cv.imread(this_dir_body_ext) lt.debug('In frames_are_identical(), comparing images...') - identical = it.images_are_identical( - previous_img, this_img, tolerance_image_pixel - ) + identical = it.images_are_identical(previous_img, this_img, tolerance_image_pixel) lt.debug('In frames_are_identical(), Done. identical =', identical) # Return. return identical @@ -445,10 +403,7 @@ def _err_if_video_exists(self, do_err: bool = False): if not do_err: return if ft.file_exists(self.dst_video_dir_name_ext): - lt.error_and_raise( - RuntimeError, - f"There is already an existing video file '{self.dst_video_dir_name_ext}'", - ) + lt.error_and_raise(RuntimeError, f"There is already an existing video file '{self.dst_video_dir_name_ext}'") def _remove_existing_video(self, do_remove: bool = False): if not do_remove: @@ -513,9 +468,7 @@ def _files_list_to_video( str_vals = [] for src_name_ext in src_names_exts: _, _, src_ext = ft.path_components(src_name_ext) - src_name_ext_norm = "file " + ft.path_to_cmd_line( - os.path.join(src_dir, src_name_ext) - ) + src_name_ext_norm = "file " + ft.path_to_cmd_line(os.path.join(src_dir, src_name_ext)) src_name_ext_norm = src_name_ext_norm.replace("\\", "/") str_vals.append(src_name_ext_norm) if not (src_ext.strip(".").lower() in self._video_extensions): @@ -525,24 +478,16 @@ def _files_list_to_video( tmp_path_name_ext = self._str_list_to_tmp_file(str_vals, tmp_dir) # ffmpeg args - args, paths = self.video_control.get_ffmpeg_args( - widthheight_vidorimg_dir_name_ext - ) - paths.update( - {"INDIR": tmp_path_name_ext, "DSTFILE": dst_video_dir_name_ext} - ) - cmd = self._build_ffmpeg_cmd( - f"-f concat -safe 0 -i %INDIR% {args} %DSTFILE%", paths - ) + args, paths = self.video_control.get_ffmpeg_args(widthheight_vidorimg_dir_name_ext) + paths.update({"INDIR": tmp_path_name_ext, "DSTFILE": dst_video_dir_name_ext}) + cmd = self._build_ffmpeg_cmd(f"-f concat -safe 0 -i %INDIR% {args} %DSTFILE%", paths) # execute ffmpeg from the frames directory subt.run(cmd, cwd=src_dir) finally: ft.delete_file(tmp_path_name_ext) - def frames_to_video( - self, frame_names: list[str], tmp_dir: str = None, overwrite: bool = False - ): + def frames_to_video(self, frame_names: list[str], tmp_dir: str = None, overwrite: bool = False): """Converts specific frames in into a video (vs construct_video() which uses all available frames). This takes about ~2 minutes for 450 frames with two Xeon E5-2695 36-thread cpus. @@ -564,10 +509,7 @@ def frames_to_video( try: # sanitize inputs if not ft.directory_exists(self.src_frames_dir): - lt.error_and_raise( - RuntimeError, - f"Could not find the frames directory '{self.src_frames_dir}'!", - ) + lt.error_and_raise(RuntimeError, f"Could not find the frames directory '{self.src_frames_dir}'!") if len(frame_names) == 0: return None frame_names_msg = f" from frames [\"{frame_names[0]}\", ...]" @@ -577,12 +519,7 @@ def frames_to_video( # build the video self._files_list_to_video( - self.src_frames_dir, - frame_names, - self.dst_video_dir_name_ext, - tmp_dir, - overwrite, - img0_dir_path_ext, + self.src_frames_dir, frame_names, self.dst_video_dir_name_ext, tmp_dir, overwrite, img0_dir_path_ext ) return self.dst_video_dir_name_ext @@ -605,19 +542,12 @@ def construct_video(self, tmp_dir: str = None, overwrite: bool = False): """ lt.debug("In construct_video") ext = self.frame_control.inframe_format - frame_names_dict = ft.files_in_directory_by_extension( - self.src_frames_dir, [ext], sort=True - ) + frame_names_dict = ft.files_in_directory_by_extension(self.src_frames_dir, [ext], sort=True) frame_names = frame_names_dict[ext] lt.debug(f"Found {len(frame_names)} frames to construct with ext '{ext}'") return self.frames_to_video(frame_names, tmp_dir, overwrite) - def merge_videos( - self, - src_video_names: list[str] = None, - tmp_dir: str = None, - overwrite: bool = False, - ): + def merge_videos(self, src_video_names: list[str] = None, tmp_dir: str = None, overwrite: bool = False): """Merges many videos into a single video. For H.265 videos, this is a very fast operation. @@ -634,21 +564,14 @@ def merge_videos( """ lt.debug("In merge_videos") # get some values - src_video_dir, _, src_video_ext = ft.path_components( - self.src_video_dir_name_ext - ) + src_video_dir, _, src_video_ext = ft.path_components(self.src_video_dir_name_ext) dst_video_dir, _, _ = ft.path_components(self.dst_video_dir_name_ext) # sanitize inputs if not ft.directory_exists(src_video_dir): - lt.error_and_raise( - RuntimeError, - f"Could not find the source videos directory '{src_video_dir}'!", - ) + lt.error_and_raise(RuntimeError, f"Could not find the source videos directory '{src_video_dir}'!") if src_video_names == None: - src_video_names_dict = ft.files_in_directory_by_extension( - src_video_dir, [src_video_ext] - ) + src_video_names_dict = ft.files_in_directory_by_extension(src_video_dir, [src_video_ext]) src_video_names = src_video_names_dict[src_video_ext] src_video_names = listt.natural_sort(src_video_names) if len(src_video_names) == 0: @@ -660,12 +583,7 @@ def merge_videos( # build the video src_video0_dir_name_ext = os.path.join(src_video_dir, src_video_names[0]) self._files_list_to_video( - src_video_dir, - src_video_names, - self.dst_video_dir_name_ext, - tmp_dir, - overwrite, - src_video0_dir_name_ext, + src_video_dir, src_video_names, self.dst_video_dir_name_ext, tmp_dir, overwrite, src_video0_dir_name_ext ) return self.dst_video_dir_name_ext @@ -680,9 +598,7 @@ def transform_video(self, overwrite: bool = False): """ # validate input if not ft.file_exists(self.src_video_dir_name_ext): - lt.error_and_raise( - RuntimeError, f"Video '{self.src_video_dir_name_ext}' doesn't exist!" - ) + lt.error_and_raise(RuntimeError, f"Video '{self.src_video_dir_name_ext}' doesn't exist!") # remove the existing video file self._err_if_video_exists(not overwrite) @@ -690,12 +606,7 @@ def transform_video(self, overwrite: bool = False): # build the ffmpeg command args, paths = self.video_control.get_ffmpeg_args(self.src_video_dir_name_ext) - paths.update( - { - "INFILE": self.src_video_dir_name_ext, - "OUTFILE": self.dst_video_dir_name_ext, - } - ) + paths.update({"INFILE": self.src_video_dir_name_ext, "OUTFILE": self.dst_video_dir_name_ext}) cmd = self._build_ffmpeg_cmd(f"-i %INFILE% {args} %OUTFILE%", paths) # execute ffmpeg @@ -705,9 +616,7 @@ def transform_video(self, overwrite: bool = False): return self.dst_video_dir_name_ext @classmethod - def transform_powerpoint( - cls, src_video_dir_name_ext: str, dst_dir: str = None, overwrite: bool = False - ): + def transform_powerpoint(cls, src_video_dir_name_ext: str, dst_dir: str = None, overwrite: bool = False): """Makes a copy of the given video as '[path_and_name]_ppt[ext]' with a reduced size and codec suitable for inclusion in power point. This takes ~3 minutes for 26,000 1080p frames with two Xeon E5-2695 18-core cpus. @@ -734,11 +643,7 @@ def transform_powerpoint( def get_width_height(self, input_or_output="input"): """Returns the width and height of the source video (or image), in pixels.""" # from https://superuser.com/questions/841235/how-do-i-use-ffmpeg-to-get-the-video-resolution - path = ( - self.src_video_dir_name_ext - if input_or_output == "input" - else self.dst_video_dir_name_ext - ) + path = self.src_video_dir_name_ext if input_or_output == "input" else self.dst_video_dir_name_ext path = ft.path_to_cmd_line(path) lines = subt.run( f"ffprobe -v error -select_streams v:0 -show_entries stream=width,height -of csv=s=x:p=0 {path}" @@ -751,11 +656,7 @@ def get_width_height(self, input_or_output="input"): def get_duration(self, input_or_output="input"): """Returns the duration of the source video in seconds.""" # from https://superuser.com/questions/650291/how-to-get-video-duration-in-seconds - path = ( - self.src_video_dir_name_ext - if input_or_output == "input" - else self.dst_video_dir_name_ext - ) + path = self.src_video_dir_name_ext if input_or_output == "input" else self.dst_video_dir_name_ext path = ft.path_to_cmd_line(path) lines = subt.run( f"ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {path}" @@ -767,11 +668,7 @@ def get_duration(self, input_or_output="input"): def get_num_frames(self, input_or_output="input"): """Returns the number of frames in the source video.""" # from https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg - path = ( - self.src_video_dir_name_ext - if input_or_output == "input" - else self.dst_video_dir_name_ext - ) + path = self.src_video_dir_name_ext if input_or_output == "input" else self.dst_video_dir_name_ext path = ft.path_to_cmd_line(path) lines = subt.run( f"ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 {path}" diff --git a/opencsp/common/lib/render/View3d.py b/opencsp/common/lib/render/View3d.py index c551a785..727c65de 100644 --- a/opencsp/common/lib/render/View3d.py +++ b/opencsp/common/lib/render/View3d.py @@ -115,9 +115,7 @@ def show( # Axes aspect ratio. if equal: if self.view_spec['type'] == '3d': - ax3d.set_3d_axes_equal( - self.axis - ) # , set_zmin_zero=True, box_aspect=None) + ax3d.set_3d_axes_equal(self.axis) # , set_zmin_zero=True, box_aspect=None) elif ( (self.view_spec['type'] == 'xy') or (self.view_spec['type'] == 'xz') @@ -197,9 +195,7 @@ def show( # WRITE - def show_and_save_multi_axis_limits( - self, output_dir, output_figure_body, limits_list, grid=True - ): + def show_and_save_multi_axis_limits(self, output_dir, output_figure_body, limits_list, grid=True): # Draw and save. if limits_list != None: for limits in limits_list: @@ -217,11 +213,7 @@ def show_and_save_multi_axis_limits( ) else: self.show_and_save( - output_dir, - output_figure_body, - x_limits=limits[0], - y_limits=limits[1], - grid=grid, + output_dir, output_figure_body, x_limits=limits[0], y_limits=limits[1], grid=grid ) def show_and_save( @@ -286,17 +278,11 @@ def save(self, output_dir, output_figure_body, format='png', dpi=300) -> str: def limit_suffix(self): limit_suffix_str = '' if self.x_limits: - limit_suffix_str += ( - '_' + str(self.x_limits[0]) + 'x' + str(self.x_limits[1]) - ) + limit_suffix_str += '_' + str(self.x_limits[0]) + 'x' + str(self.x_limits[1]) if self.y_limits: - limit_suffix_str += ( - '_' + str(self.y_limits[0]) + 'y' + str(self.y_limits[1]) - ) + limit_suffix_str += '_' + str(self.y_limits[0]) + 'y' + str(self.y_limits[1]) if self.z_limits: - limit_suffix_str += ( - '_' + str(self.z_limits[0]) + 'z' + str(self.z_limits[1]) - ) + limit_suffix_str += '_' + str(self.z_limits[0]) + 'z' + str(self.z_limits[1]) return limit_suffix_str # Image Plotting @@ -307,9 +293,7 @@ def imshow(self, *args, colorbar=False, **kwargs) -> None: top of other plots (example on top of 3D data) use draw_image instead.""" if self.view_spec['type'] == 'image': # load the image, as necessary - load_as_necessary = lambda img: ( - img if not isinstance(img, str) else Image.open(img) - ) + load_as_necessary = lambda img: (img if not isinstance(img, str) else Image.open(img)) if 'X' in kwargs: img = kwargs['X'] kwargs['X'] = load_as_necessary(img) @@ -346,9 +330,7 @@ def draw_image(self, path_or_array: str | np.ndarray): ymid = (ybnd[1] - ybnd[0]) / 2 + ybnd[0] ydraw = [ymid - height / 2, ymid + height / 2] - self.axis.imshow( - img, extent=[xdraw[0], xdraw[1], ydraw[0], ydraw[1]], zorder=-1 - ) + self.axis.imshow(img, extent=[xdraw[0], xdraw[1], ydraw[0], ydraw[1]], zorder=-1) def pcolormesh(self, *args, colorbar=False, **kwargs) -> None: """Allows plotting like imshow, with the additional option of sizing the boxes at will. @@ -418,9 +400,7 @@ def pq2xyz(self, pq): def draw_xyz_text(self, xyz, text, style=rctxt.default()): # An xyz is [x,y,z] if len(xyz) != 3: - lt.error( - 'ERROR: In draw_xyz_text(), len(xyz)=', len(xyz), ' is not equal to 3.' - ) + lt.error('ERROR: In draw_xyz_text(), len(xyz)=', len(xyz), ' is not equal to 3.') assert False if self.view_spec['type'] == '3d': self.axis.text( @@ -509,12 +489,7 @@ def draw_xyz_text(self, xyz, text, style=rctxt.default()): # An xyz is [x,y,z] ) assert False - def draw_xyz( - self, - xyz, # An xyz is [x,y,z] - style: rcps.RenderControlPointSeq = None, - label: str = None, - ): + def draw_xyz(self, xyz, style: rcps.RenderControlPointSeq = None, label: str = None): # An xyz is [x,y,z] """Plots a single point, I think (BGB).""" if style == None: style = rcps.default() @@ -605,12 +580,7 @@ def draw_xyz( ) assert False - def draw_single_Pxyz( - self, - p: Pxyz, - style: rcps.RenderControlPointSeq = None, - labels: list[str] = None, - ): + def draw_single_Pxyz(self, p: Pxyz, style: rcps.RenderControlPointSeq = None, labels: list[str] = None): if labels == None: labels = [None] * len(p) if style == None: @@ -618,9 +588,7 @@ def draw_single_Pxyz( for x, y, z, label in zip(p.x, p.y, p.z, labels): self.draw_xyz((x, y, z), style, label) - def draw_xyz_list( - self, input_xyz_list: list[list], close=False, style=None, label=None - ) -> None: + def draw_xyz_list(self, input_xyz_list: list[list], close=False, style=None, label=None) -> None: """Draw lines or closed polygons. Parameters @@ -770,44 +738,23 @@ def draw_xyz_surface( ) def draw_xyz_trisurface( - self, - x: ndarray, - y: ndarray, - z: ndarray, - surface_style: RenderControlSurface = None, - **kwargs + self, x: ndarray, y: ndarray, z: ndarray, surface_style: RenderControlSurface = None, **kwargs ): if surface_style == None: surface_style = RenderControlSurface() if self.view_spec['type'] == '3d': - self.axis.plot_trisurf( - x, y, z, color=surface_style.color, alpha=surface_style.alpha, **kwargs - ) + self.axis.plot_trisurf(x, y, z, color=surface_style.color, alpha=surface_style.alpha, **kwargs) # TODO tjlarki: currently unused # TODO tjlarki: might want to remove, this is a very slow function - def quiver( - self, - X: ndarray, - Y: ndarray, - Z: ndarray, - U: ndarray, - V: ndarray, - W: ndarray, - length: float = 0, - ) -> None: + def quiver(self, X: ndarray, Y: ndarray, Z: ndarray, U: ndarray, V: ndarray, W: ndarray, length: float = 0) -> None: self.axis.quiver(X, Y, Z, U, V, W, length=0) # PQ PLOTTING def draw_pq_text(self, pq, text, style=rctxt.default()): # A pq is [p,q] if (len(pq) != 2) and (len(pq) != 3): - lt.error_and_raise( - RuntimeError, - 'ERROR: In draw_pq_text(), len(pq)=', - len(pq), - ' is not equal to 2 or 3.', - ) + lt.error_and_raise(RuntimeError, 'ERROR: In draw_pq_text(), len(pq)=', len(pq), ' is not equal to 2 or 3.') if self.view_spec['type'] == '3d': lt.error_and_raise( RuntimeError, @@ -845,11 +792,7 @@ def draw_pq_text(self, pq, text, style=rctxt.default()): # A pq is [p,q] def draw_pq(self, pq, style=rcps.default(), label=None): # A pq is [p,q] if (len(pq) != 2) and (len(pq) != 3): - lt.error( - 'ERROR: In draw_pq_text(), len(pq)=', - len(pq), - ' is not equal to 2 or 3.', - ) + lt.error('ERROR: In draw_pq_text(), len(pq)=', len(pq), ' is not equal to 2 or 3.') assert False if self.view_spec['type'] == '3d': lt.error( @@ -942,9 +885,7 @@ def draw_pq_list( def draw_xyzdxyz_list( self, - input_xyzdxyz_list: list[ - list[list, list] - ], # An xyzdxyz is [[x,y,z], [dx,dy,dz]] + input_xyzdxyz_list: list[list[list, list]], # An xyzdxyz is [[x,y,z], [dx,dy,dz]] close: bool = False, # Draw as a closed polygon. Ignore if less than three points. style: rcps.RenderControlPointSeq = rcps.default(), label: str = None, @@ -956,9 +897,7 @@ def draw_xyzdxyz_list( xyz_list = [xyzdxyz[0] for xyzdxyz in xyzdxyz_list] self.draw_xyz_list(xyz_list, close=close, style=style, label=label) # Setup the vector drawing style. - vector_style = rcps.outline( - color=style.vector_color, linewidth=style.vector_linewidth - ) + vector_style = rcps.outline(color=style.vector_color, linewidth=style.vector_linewidth) # Draw the vectors. for xyzdxyz in xyzdxyz_list: xyz0 = xyzdxyz[0] @@ -992,9 +931,7 @@ def draw_pqdpq_list( pq_list = [pqdpq[0] for pqdpq in pqdpq_list] self.draw_pq_list(pq_list, close=close, style=style, label=label) # Setup the vector drawing style. - vector_style = rcps.outline( - color=style.vector_color, linewidth=style.vector_linewidth - ) + vector_style = rcps.outline(color=style.vector_color, linewidth=style.vector_linewidth) # Draw the vectors. for pqdpq in pqdpq_list: pq0 = pqdpq[0] diff --git a/opencsp/common/lib/render/axis_3d.py b/opencsp/common/lib/render/axis_3d.py index 6c670d07..47642f12 100644 --- a/opencsp/common/lib/render/axis_3d.py +++ b/opencsp/common/lib/render/axis_3d.py @@ -12,9 +12,7 @@ # Set Axes Equal in 3d -def set_3d_axes_equal( - ax: Axes3D, set_zmin_zero=False, box_aspect: None | tuple[int, int, int] = (1, 1, 1) -): +def set_3d_axes_equal(ax: Axes3D, set_zmin_zero=False, box_aspect: None | tuple[int, int, int] = (1, 1, 1)): ''' Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one possible solution to Matplotlib's @@ -34,9 +32,7 @@ def set_3d_axes_equal( # Check input. if set_zmin_zero and (z_limits[0] < 0): - print( - 'WARNING: Encountered negative values when attempting to set axis z limits relative to zero.' - ) + print('WARNING: Encountered negative values when attempting to set axis z limits relative to zero.') # Set z interval. if set_zmin_zero: diff --git a/opencsp/common/lib/render/figure_management.py b/opencsp/common/lib/render/figure_management.py index 68a35e59..c18344da 100644 --- a/opencsp/common/lib/render/figure_management.py +++ b/opencsp/common/lib/render/figure_management.py @@ -16,9 +16,7 @@ import matplotlib.pyplot as plt import numpy as np -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure import opencsp.common.lib.render_control.RenderControlAxis as rca import opencsp.common.lib.render_control.RenderControlFigureRecord as rcfr @@ -58,7 +56,7 @@ def reset_figure_management(): def _mpl_pyplot_figure(*vargs, **kwargs): - """ Initializes and returns a matplotlib.pyplot.figure() instance. + """Initializes and returns a matplotlib.pyplot.figure() instance. If creating the figure fails, try again (up to two more times). @@ -83,6 +81,7 @@ def _mpl_pyplot_figure(*vargs, **kwargs): # try a third time lt.warn("Failed to create a matplotlib.pyplot.figure instance. Trying again (3rd attempt).") import time + time.sleep(1) return plt.figure(*vargs, **kwargs) @@ -91,15 +90,9 @@ def tile_figure( name=None, # Handle and title of figure window. tile_array: tuple[int, int] = (3, 2), # (n_y, n_x) ~ (columns, rows) tile_square: bool = False, # Force figure to have equal x:y aspect ratio. - screen_size: tuple[float, float] = ( - 19.0, - 10.0, - ), # Screen (width, height) in "inches." Set by experimentation. + screen_size: tuple[float, float] = (19.0, 10.0), # Screen (width, height) in "inches." Set by experimentation. header_height: float = 0.8, # Height of window title and display tool header, in "inches." - screen_pixels: tuple[float, float] = ( - 1920, - 1080, - ), # (n_x, n_y). Subtract task bar pixels from y. + screen_pixels: tuple[float, float] = (1920, 1080), # (n_x, n_y). Subtract task bar pixels from y. task_bar_pixels: float = 40, ): # Height of task bar in pixels. """ @@ -165,18 +158,14 @@ def display_image( figsize: tuple[float, float] = (6.4, 4.8), # inch. tile: bool = True, # True => Lay out figures in grid. False => Place at upper_left or default screen center. tile_array: tuple[int, int] = (3, 2), # (n_x, n_y) - upper_left_xy: tuple[ - float, float - ] = None, # pixel. (0,0) --> Upper left corner of screen. + upper_left_xy: tuple[float, float] = None, # pixel. (0,0) --> Upper left corner of screen. cmap=None, # Color scheme to use. block=False, ) -> plt.Figure: """If all you want to do is draw an image to the screen, then this is the method for you.""" # set up the figure axis_control = rca.image(grid=False) - figure_control = RenderControlFigure( - tile=tile, tile_array=tile_array, figsize=figsize, upper_left_xy=upper_left_xy - ) + figure_control = RenderControlFigure(tile=tile, tile_array=tile_array, figsize=figsize, upper_left_xy=upper_left_xy) view_spec_2d = vs.view_spec_im() fig_record = setup_figure( figure_control, @@ -204,9 +193,7 @@ def _setup_figure( name: str = None, # Figure handle and title of figure window. If none, use title. title: str = None, # Title of plot (before number is added, if applicable). caption: str = None, # Caption providing concise descrption plot. Optional details may be added via comments. - comments: list[ - str - ] = None, # List of strings including comments to associate with the figure. + comments: list[str] = None, # List of strings including comments to associate with the figure. # String of form "code_file.function_name()" showing where to look in code for call that generated this figure. code_tag: str = None, ) -> RenderControlFigureRecord: @@ -236,11 +223,7 @@ def _setup_figure( # Create figure. if figure_control.tile: - fig = tile_figure( - name, - tile_array=figure_control.tile_array, - tile_square=figure_control.tile_square, - ) + fig = tile_figure(name, tile_array=figure_control.tile_array, tile_square=figure_control.tile_square) else: fig = _mpl_pyplot_figure(name, figsize=figure_control.figsize) if figure_control.upper_left_xy: @@ -258,9 +241,7 @@ def _setup_figure( plt.grid() # Update figure collection variables. - fig_record = rcfr.RenderControlFigureRecord( - name, title, caption, figure_num, fig, axis_control - ) + fig_record = rcfr.RenderControlFigureRecord(name, title, caption, figure_num, fig, axis_control) global fig_record_list fig_record_list.append(fig_record) @@ -320,16 +301,7 @@ def setup_figure( # Setup the figure. fig_record = _setup_figure( - figure_control, - axis_control, - equal, - number_in_name, - input_prefix, - name, - title, - caption, - comments, - code_tag, + figure_control, axis_control, equal, number_in_name, input_prefix, name, title, caption, comments, code_tag ) axis_control = fig_record.axis_control @@ -362,9 +334,7 @@ def setup_figure( ) # Create the view object. - view = v3d.View3d( - fig_record.figure, ax, view_spec=view_spec, equal=equal, parent=fig_record - ) + view = v3d.View3d(fig_record.figure, ax, view_spec=view_spec, equal=equal, parent=fig_record) # Add view to log data. fig_record.axis = ax fig_record.view = view @@ -410,16 +380,7 @@ def setup_figure_for_3d_data( # Setup the figure. fig_record = _setup_figure( - figure_control, - axis_control, - equal, - number_in_name, - input_prefix, - name, - title, - caption, - comments, - code_tag, + figure_control, axis_control, equal, number_in_name, input_prefix, name, title, caption, comments, code_tag ) axis_control = fig_record.axis_control @@ -462,9 +423,7 @@ def setup_figure_for_3d_data( ) # Create the view object. - view = v3d.View3d( - fig_record.figure, ax, view_spec=view_spec, equal=equal, parent=fig_record - ) + view = v3d.View3d(fig_record.figure, ax, view_spec=view_spec, equal=equal, parent=fig_record) # Add view to log data. fig_record.axis = ax fig_record.view = view @@ -484,9 +443,7 @@ def display_plot( figsize: tuple[float, float] = (6.4, 4.8), # inch. tile: bool = True, # True => Lay out figures in grid. False => Place at upper_left or default screen center. tile_array: tuple[float, float] = (3, 2), # (n_x, n_y) - upper_left_xy: tuple[ - float, float - ] = None, # pixel. (0,0) --> Upper left corner of screen. + upper_left_xy: tuple[float, float] = None, # pixel. (0,0) --> Upper left corner of screen. legend: bool = True, # Whether to draw a legend. color='k', linewidth: float = 1, @@ -503,9 +460,7 @@ def display_plot( fig.canvas.manager.window.move(x, y) if title and len(title) != 0: plt.title(title) - (line,) = plt.plot( - x, y, color=color, linewidth=linewidth, marker=marker, markersize=markersize - ) + (line,) = plt.plot(x, y, color=color, linewidth=linewidth, marker=marker, markersize=markersize) if label: line.set_label(label) # # Rotate x-axis tick marks. @@ -552,9 +507,7 @@ def print_figure_summary() -> None: fig_record.print_comments() -def save_all_figures( - output_path: str, format: str = None, timeout: float = None, raise_on_timeout=False -): +def save_all_figures(output_path: str, format: str = None, timeout: float = None, raise_on_timeout=False): """Saves all figures opened with setup_figure (since reset_figure_management) to the given directory. The purpose for timeout is to let the program fail gracefully @@ -600,11 +553,7 @@ def save_all_figures( for fig_record in fig_record_list: # start the save results = [] - t = Thread( - target=lambda: results.append( - fig_record.save(output_path, format=format) - ) - ) + t = Thread(target=lambda: results.append(fig_record.save(output_path, format=format))) t.start() # wait for the save to finish diff --git a/opencsp/common/lib/render/image_plot.py b/opencsp/common/lib/render/image_plot.py index ec6cf086..311acbdb 100644 --- a/opencsp/common/lib/render/image_plot.py +++ b/opencsp/common/lib/render/image_plot.py @@ -47,17 +47,10 @@ def plot_image_figure( output_body_ext = output_body + '.png' if include_figure_idx_in_filename: global global_figure_idx - output_body_ext = ( - '{0:03d}'.format(global_figure_idx) + '_' + output_body_ext - ) + output_body_ext = '{0:03d}'.format(global_figure_idx) + '_' + output_body_ext global_figure_idx += 1 output_dir_body_ext = os.path.join(output_dir, output_body_ext) - print( - 'In plot_image_figure(), called from ' - + context_str - + ', writing ' - + output_dir_body_ext - ) + print('In plot_image_figure(), called from ' + context_str + ', writing ' + output_dir_body_ext) plt.savefig(output_dir_body_ext, dpi=dpi) # Close plot to free up resources. plt.close() diff --git a/opencsp/common/lib/render/lib/PowerpointImage.py b/opencsp/common/lib/render/lib/PowerpointImage.py index 7822f1c5..474f7efb 100644 --- a/opencsp/common/lib/render/lib/PowerpointImage.py +++ b/opencsp/common/lib/render/lib/PowerpointImage.py @@ -11,9 +11,7 @@ class PowerpointImage(pps.PowerpointShape): - _tmp_save_path = os.path.join( - orp.opencsp_temporary_dir(), "PowerpointImage/images/tmp" - ) + _tmp_save_path = os.path.join(orp.opencsp_temporary_dir(), "PowerpointImage/images/tmp") def __init__( self, @@ -62,15 +60,11 @@ def __init__( def has_val(self): return self._val is not None - def get_val( - self, - ) -> None | str | np.ndarray | Image.Image | rcfr.RenderControlFigureRecord: + def get_val(self) -> None | str | np.ndarray | Image.Image | rcfr.RenderControlFigureRecord: """Get the image assigned to this instance. What you probably actually want is get_saved_path().""" return self._val - def set_val( - self, image: str | np.ndarray | Image.Image | rcfr.RenderControlFigureRecord - ): + def set_val(self, image: str | np.ndarray | Image.Image | rcfr.RenderControlFigureRecord): self._val = image self.width = -1 self.height = -1 @@ -89,9 +83,7 @@ def set_val( def _test_saved_path(self): """Verification check that I (BGB) haven't goofed up how images are saved to temporary files.""" - if ft.path_to_cmd_line(self.get_saved_path()) == ft.path_to_cmd_line( - str(self._val) - ): + if ft.path_to_cmd_line(self.get_saved_path()) == ft.path_to_cmd_line(str(self._val)): if "tmp" in str(self._val): pass # lt.info(f"Image val and save path are the same:\nval: {self._val}\nsave path: {self.get_saved_path()}") else: @@ -149,10 +141,7 @@ def dims_pptx(self): @staticmethod def _image_dims_relative_to_cell( - cell_dims: tuple[float, float, float, float], - image_width: int, - image_height: int, - stretch=False, + cell_dims: tuple[float, float, float, float], image_width: int, image_height: int, stretch=False ): """Returns the x, y, width, and height of an image fitted to the dimensions of the given cell.""" cell_x, cell_y, cell_width, cell_height = cell_dims @@ -183,16 +172,12 @@ def _image_dims_relative_to_cell( def fit_to_cell_dimensions(self, cell_dims: tuple[float, float, float, float]): width, height = self.get_size() self.cell_dims = cell_dims - self.dims = self._image_dims_relative_to_cell( - self.cell_dims, width, height, self.stretch - ) + self.dims = self._image_dims_relative_to_cell(self.cell_dims, width, height, self.stretch) def stretch_to_cell_dimensions(self, cell_dims: tuple[float, float, float, float]): width, height = self.get_size() self.cell_dims = cell_dims - self.dims = self._image_dims_relative_to_cell( - self.cell_dims, width, height, stretch=True - ) + self.dims = self._image_dims_relative_to_cell(self.cell_dims, width, height, stretch=True) def reduce_size(self, reduced_image_size_scale: float = -1): """If the given image is significantly bigger than its rendered size, @@ -220,9 +205,7 @@ def reduce_size(self, reduced_image_size_scale: float = -1): # image is larger than is reasonable, shrink it pil_image: Image.Image = Image.open(self.get_saved_path()) - lt.debug( - f"Resizing from ({image_width,image_height}) to ({expected_width_pixels,expected_height_pixels})" - ) + lt.debug(f"Resizing from ({image_width,image_height}) to ({expected_width_pixels,expected_height_pixels})") pil_image = pil_image.resize((expected_width_pixels, expected_height_pixels)) pil_image.save(self.get_saved_path()) @@ -356,9 +339,7 @@ def from_txt_file(cls, path_name_ext: str): image_path_name_ext = None if not has_val else os.path.join(path, slines[3]) caption = None if caption_is_none else caption - return cls( - image_path_name_ext, dims, cell_dims, caption_is_above, caption, stretch - ) + return cls(image_path_name_ext, dims, cell_dims, caption_is_above, caption, stretch) @classmethod def _get_save_dir_name_ext_pattern(cls, slide_idx: int = None, for_glob=False): @@ -471,12 +452,8 @@ def append_tmp_path(self, append_dir: str): def clear_tmp_save_all(cls): """Remove all temporarily saved files from PowerpointImage.save()""" if ft.directory_exists(cls._tmp_save_path, error_if_exists_as_file=False): - ft.delete_files_in_directory( - cls._tmp_save_path, "*.png", error_on_dir_not_exists=False - ) - ft.delete_files_in_directory( - cls._tmp_save_path, "*.png.txt", error_on_dir_not_exists=False - ) + ft.delete_files_in_directory(cls._tmp_save_path, "*.png", error_on_dir_not_exists=False) + ft.delete_files_in_directory(cls._tmp_save_path, "*.png.txt", error_on_dir_not_exists=False) @classmethod def append_tmp_path_all(cls, append_dir: str): diff --git a/opencsp/common/lib/render/lib/PowerpointShape.py b/opencsp/common/lib/render/lib/PowerpointShape.py index 5f4ef83e..4c12c16f 100644 --- a/opencsp/common/lib/render/lib/PowerpointShape.py +++ b/opencsp/common/lib/render/lib/PowerpointShape.py @@ -4,11 +4,7 @@ class PowerpointShape: - def __init__( - self, - cell_dims: tuple[float, float, float, float] = None, - code_location: str = None, - ): + def __init__(self, cell_dims: tuple[float, float, float, float] = None, code_location: str = None): """This class supplements the shape class from python-pptx. It allows us to do our custom layouts a little bit easier. Args: diff --git a/opencsp/common/lib/render/lib/PowerpointText.py b/opencsp/common/lib/render/lib/PowerpointText.py index 8dbb2fa5..e54e4e64 100644 --- a/opencsp/common/lib/render/lib/PowerpointText.py +++ b/opencsp/common/lib/render/lib/PowerpointText.py @@ -7,9 +7,7 @@ class PowerpointText(pps.PowerpointShape): - _tmp_save_path = os.path.join( - orp.opencsp_temporary_dir(), "PowerpointTexts", "texts", "tmp" - ) + _tmp_save_path = os.path.join(orp.opencsp_temporary_dir(), "PowerpointTexts", "texts", "tmp") def __init__( self, @@ -84,12 +82,7 @@ def _to_text_file(self, path_name_ext: str): with open(path_name_ext, "w") as fout: fout.write("PowerpointText\n") fout.write("v1\n") - for v in [ - self._dims_to_str(self.dims), - self._dims_to_str(self.cell_dims), - self.is_title, - self.has_val(), - ]: + for v in [self._dims_to_str(self.dims), self._dims_to_str(self.cell_dims), self.is_title, self.has_val()]: fout.write(f"{v}\n") fout.write(f"{self.get_val()}") @@ -103,8 +96,7 @@ def from_txt_file(cls, path_name_ext: str): slines = [line.strip() for line in lines] if len(slines) < 2: lt.error_and_raise( - RuntimeError, - f"Error: in PowerpointSlide.from_txt_file(), not enough lines in file {path_name_ext}", + RuntimeError, f"Error: in PowerpointSlide.from_txt_file(), not enough lines in file {path_name_ext}" ) file_type = slines[0] diff --git a/opencsp/common/lib/render/test/test_ImageAttributeParser.py b/opencsp/common/lib/render/test/test_ImageAttributeParser.py index dbcf58af..6df205ca 100644 --- a/opencsp/common/lib/render/test/test_ImageAttributeParser.py +++ b/opencsp/common/lib/render/test/test_ImageAttributeParser.py @@ -13,15 +13,9 @@ def setUp(self) -> None: path, _, _ = ft.path_components(__file__) self.data_dir = os.path.join(path, "data", "input", "ImageAttributeParser") self.out_dir = os.path.join(path, "data", "output", "ImageAttributeParser") - self.img_file = os.path.join( - self.out_dir, f"nonexistant_image_{self._testMethodName}.png" - ) - attr_file_src = os.path.join( - self.data_dir, f"nonexistant_image_{self._testMethodName}.txt" - ) - self.attr_file = os.path.join( - self.out_dir, f"nonexistant_image_{self._testMethodName}.txt" - ) + self.img_file = os.path.join(self.out_dir, f"nonexistant_image_{self._testMethodName}.png") + attr_file_src = os.path.join(self.data_dir, f"nonexistant_image_{self._testMethodName}.txt") + self.attr_file = os.path.join(self.out_dir, f"nonexistant_image_{self._testMethodName}.txt") ft.create_directories_if_necessary(self.out_dir) @@ -64,17 +58,13 @@ def test_with_attrfile(self): self.assertEqual(self.img_file, parser.current_image_source) # The rest of these values should be set by the attributes file, since # they are not given in the ImageAttributeParser constructor. - self.assertEqual( - datetime.datetime.fromisoformat('2024-02-17'), parser.date_collected - ) + self.assertEqual(datetime.datetime.fromisoformat('2024-02-17'), parser.date_collected) self.assertEqual('c', parser.experiment_name) self.assertEqual('d', parser.notes) # Should raise an error when trying to replace the original_image_source. with self.assertRaises(ValueError): - iap.ImageAttributeParser( - current_image_source=self.img_file, original_image_source='z' - ) + iap.ImageAttributeParser(current_image_source=self.img_file, original_image_source='z') def test_partial_attrfile(self): """Constructor pulls in non-None values from existing attributes file""" diff --git a/opencsp/common/lib/render/test/test_VideoHandler.py b/opencsp/common/lib/render/test/test_VideoHandler.py index ae681e89..a70afd43 100644 --- a/opencsp/common/lib/render/test/test_VideoHandler.py +++ b/opencsp/common/lib/render/test/test_VideoHandler.py @@ -12,26 +12,8 @@ class test_VideoHandler(unittest.TestCase): - dir_in = os.path.join( - orp.opencsp_code_dir(), - 'common', - 'lib', - 'render', - 'test', - 'data', - 'input', - 'VideoHandler', - ) - dir_out = os.path.join( - orp.opencsp_code_dir(), - 'common', - 'lib', - 'render', - 'test', - 'data', - 'output', - 'VideoHandler', - ) + dir_in = os.path.join(orp.opencsp_code_dir(), 'common', 'lib', 'render', 'test', 'data', 'input', 'VideoHandler') + dir_out = os.path.join(orp.opencsp_code_dir(), 'common', 'lib', 'render', 'test', 'data', 'output', 'VideoHandler') @classmethod def setUpClass(cls) -> None: @@ -79,9 +61,7 @@ def test_extract_frames(self): test_dir = os.path.join(self.dir_out, "test_extract_frames") ft.create_directories_if_necessary(test_dir) - handler = vh.VideoHandler.VideoExtractor( - src_video_dir_name_ext, test_dir, None, self.frame_control - ) + handler = vh.VideoHandler.VideoExtractor(src_video_dir_name_ext, test_dir, None, self.frame_control) nextracted = handler.extract_frames() # verify that we extracted 25 frames from a 1 second clip at 25 fps @@ -91,10 +71,7 @@ def test_extract_frames(self): for frame_idx, color_idx in [(3, 0), (11, 1), (20, 2)]: img_name_ext = frame_name_format % frame_idx img_dir_name_ext = os.path.join(test_dir, img_name_ext) - self.assertTrue( - os.path.exists(img_dir_name_ext), - f"Could not find image file \"{img_dir_name_ext}\"", - ) + self.assertTrue(os.path.exists(img_dir_name_ext), f"Could not find image file \"{img_dir_name_ext}\"") img = cv2.imread(img_dir_name_ext) row_avg = np.average(img, axis=0) @@ -108,9 +85,7 @@ def test_extract_example_frames(self): example_dir = os.path.join(out_dir, "examples") ft.create_directories_if_necessary(example_dir) - handler = vh.VideoHandler.VideoExtractor( - src_video_dir_name_ext, out_dir, example_dir, self.frame_control - ) + handler = vh.VideoHandler.VideoExtractor(src_video_dir_name_ext, out_dir, example_dir, self.frame_control) handler.extract_frames() # verify that we extracted 1 example frame from a 1 second clip @@ -132,9 +107,7 @@ def test_construct_video(self): self.assertAlmostEqual(actual_duration, expected_duration, delta=0.01) def test_frames_to_video_duplicates(self): - dst_video_dir_name_ext = os.path.join( - self.dir_out, "test_frames_to_video_duplicates.mp4" - ) + dst_video_dir_name_ext = os.path.join(self.dir_out, "test_frames_to_video_duplicates.mp4") handler = vh.VideoHandler.VideoCreator( self.dir_in, dst_video_dir_name_ext, self.video_control, self.frame_control ) @@ -149,9 +122,7 @@ def test_frames_to_video_duplicates(self): self.assertAlmostEqual(actual_duration, expected_duration, delta=0.01) def test_frames_to_video_exclusions(self): - dst_video_dir_name_ext = os.path.join( - self.dir_out, "test_frames_to_video_exclusions.mp4" - ) + dst_video_dir_name_ext = os.path.join(self.dir_out, "test_frames_to_video_exclusions.mp4") handler = vh.VideoHandler.VideoCreator( self.dir_in, dst_video_dir_name_ext, self.video_control, self.frame_control ) @@ -165,9 +136,7 @@ def test_transform_powerpoint(self): src_video_dir_name_ext = os.path.join(self.dir_in, "1s.mp4") dst_dir = os.path.join(self.dir_out, "test_transform_powerpoint") ft.create_directories_if_necessary(dst_dir) - dst_video_dir_name_ext = vh.VideoHandler.transform_powerpoint( - src_video_dir_name_ext, dst_dir, overwrite=True - ) + dst_video_dir_name_ext = vh.VideoHandler.transform_powerpoint(src_video_dir_name_ext, dst_dir, overwrite=True) # verify the width and height handler = vh.VideoHandler.VideoInspector(dst_video_dir_name_ext) diff --git a/opencsp/common/lib/render/test/test_figure_management.py b/opencsp/common/lib/render/test/test_figure_management.py index d271b20e..b2a136af 100644 --- a/opencsp/common/lib/render/test/test_figure_management.py +++ b/opencsp/common/lib/render/test/test_figure_management.py @@ -19,12 +19,8 @@ class test_figure_management(unittest.TestCase): - dir_in = os.path.join( - 'common', 'lib', 'render', 'test', 'data', 'input', 'figure_management' - ) - dir_out = os.path.join( - 'common', 'lib', 'render', 'test', 'data', 'output', 'figure_management' - ) + dir_in = os.path.join('common', 'lib', 'render', 'test', 'data', 'input', 'figure_management') + dir_out = os.path.join('common', 'lib', 'render', 'test', 'data', 'output', 'figure_management') def __init__(self, *vargs, **kwargs): super().__init__(*vargs, **kwargs) @@ -58,11 +54,7 @@ def test_save_all_figures_line(self): fm.reset_figure_management() figure_control = rcfg.RenderControlFigure(tile_array=(1, 1), tile_square=True) - fig_record = fm.setup_figure( - figure_control, - name=name, - code_tag=f"{__file__}.test_save_all_figures_line()", - ) + fig_record = fm.setup_figure(figure_control, name=name, code_tag=f"{__file__}.test_save_all_figures_line()") view = fig_record.view line = list(range(100)) view.draw_p_list(line) @@ -77,13 +69,9 @@ def test_save_all_figures_two_lines(self): lines = [[100] * 100, [0] * 100] for i in range(2): - figure_control = rcfg.RenderControlFigure( - tile_array=(1, 1), tile_square=True - ) + figure_control = rcfg.RenderControlFigure(tile_array=(1, 1), tile_square=True) fig_record = fm.setup_figure( - figure_control, - name=names[i], - code_tag=f"{__file__}.test_save_all_figures_two_lines()", + figure_control, name=names[i], code_tag=f"{__file__}.test_save_all_figures_two_lines()" ) view = fig_record.view line = lines[i] @@ -99,9 +87,7 @@ def test_save_all_figures_timeout_0(self): figure_control = rcfg.RenderControlFigure(tile_array=(1, 1), tile_square=True) fig_record = fm.setup_figure( - figure_control, - name=name, - code_tag=f"{__file__}.test_save_all_figures_timeout_0()", + figure_control, name=name, code_tag=f"{__file__}.test_save_all_figures_timeout_0()" ) view = fig_record.view line = list(range(100)) @@ -112,9 +98,7 @@ def test_save_all_figures_timeout_0(self): r".*failed to save.*", msg="Failed to time out in 0 seconds while trying to save figure", ): - figs, txts, failed = fm.save_all_figures( - self.dir_out, timeout=0, raise_on_timeout=True - ) + figs, txts, failed = fm.save_all_figures(self.dir_out, timeout=0, raise_on_timeout=True) self.assertEqual(len(figs), 0) self.assertEqual(len(txts), 0) @@ -124,11 +108,7 @@ def _figure_manager_timeout_1(self): fm.reset_figure_management() figure_control = rcfg.RenderControlFigure(tile_array=(1, 1), tile_square=True) - fig_old = fm.setup_figure( - figure_control, - name=name, - code_tag=f"{__file__}._figure_manager_timeout_1()", - ) + fig_old = fm.setup_figure(figure_control, name=name, code_tag=f"{__file__}._figure_manager_timeout_1()") # replace the figure record with one that will never finish saving fig_record = rcfr_is.RenderControlFigureRecordInfSave( @@ -152,9 +132,7 @@ def _figure_manager_timeout_1(self): def save_all_figures_fail_and_raise_executor(self): """try to save (should fail and raise an error)""" fm = self._figure_manager_timeout_1() - figs, txts, failed = fm.save_all_figures( - self.dir_out, timeout=1, raise_on_timeout=True - ) + figs, txts, failed = fm.save_all_figures(self.dir_out, timeout=1, raise_on_timeout=True) def test_save_all_figures_fail_and_raise(self): """Verifies that the save_all_figures() method will eventually time out for a figure record whose save() method never finishes.""" @@ -180,17 +158,9 @@ def save_all_figures_fail_no_raise_executor(self): """try to save (should fail and return the failed figure record)""" fm = self._figure_manager_timeout_1() figs, txts, failed = fm.save_all_figures(self.dir_out, timeout=1) - self.assertEqual( - 1, - len(failed), - "save_all_figures() didn't return the correct number of figure records", - ) + self.assertEqual(1, len(failed), "save_all_figures() didn't return the correct number of figure records") fig_record = fm.fig_record_list[0] - self.assertIn( - fig_record, - failed, - "save_all_figures() didn't return the correct figure record", - ) + self.assertIn(fig_record, failed, "save_all_figures() didn't return the correct figure record") lt.error("Failed gracefully") sys.exit(0) # force this process to exit (waits forever on save_all_figures()) @@ -205,9 +175,7 @@ def test_save_all_figures_fail_no_raise(self): timeout=10.0, ) stdout = [line.val for line in stdout] - self.assertIn( - "Failed gracefully", stdout, f"Subprocess didn't exit correctly." - ) + self.assertIn("Failed gracefully", stdout, f"Subprocess didn't exit correctly.") def test_save_all_figures_notimeout_100(self): """Test that with a long 100 second timeout, the figure is saved.""" @@ -216,26 +184,20 @@ def test_save_all_figures_notimeout_100(self): figure_control = rcfg.RenderControlFigure(tile_array=(1, 1), tile_square=True) fig_record = fm.setup_figure( - figure_control, - name=name, - code_tag=f"{__file__}.test_save_all_figures_notimeout_100()", + figure_control, name=name, code_tag=f"{__file__}.test_save_all_figures_notimeout_100()" ) view = fig_record.view line = list(range(100)) view.draw_p_list(line) - figs_txts_fails = fm.save_all_figures( - self.dir_out, timeout=100, raise_on_timeout=True - ) + figs_txts_fails = fm.save_all_figures(self.dir_out, timeout=100, raise_on_timeout=True) self.assert_exists(figs_txts_fails, 1) if __name__ == '__main__': import argparse - parser = argparse.ArgumentParser( - prog=__file__.rstrip(".py"), description='Testing figure management' - ) + parser = argparse.ArgumentParser(prog=__file__.rstrip(".py"), description='Testing figure management') parser.add_argument('--funcname', help="Calls the given function") args = parser.parse_args() func_name = args.funcname diff --git a/opencsp/common/lib/render/view_spec.py b/opencsp/common/lib/render/view_spec.py index 646240a5..1762805d 100644 --- a/opencsp/common/lib/render/view_spec.py +++ b/opencsp/common/lib/render/view_spec.py @@ -126,9 +126,7 @@ def xyz2pqw(xyz, view_spec): return [xyz[1], xyz[2], xyz[0]] elif view_spec['type'] == 'vplane': # Fetch section coordinate system. - origin_xyz = np.array( - view_spec['origin_xyz'] - ) # Make arrays so we can do simple vactor math. + origin_xyz = np.array(view_spec['origin_xyz']) # Make arrays so we can do simple vactor math. p_uxyz = np.array(view_spec['p_uxyz']) # q_uxyz = np.array(view_spec['q_uxyz']) # w_uxyz = np.array(view_spec['w_uxyz']) # @@ -153,11 +151,7 @@ def xyz2pqw(xyz, view_spec): # Return. return [p, q, w] else: - print( - "ERROR: In xyz2pqw(), unrecognized view_spec['type'] = '" - + str(view_spec['type']) - + "' encountered." - ) + print("ERROR: In xyz2pqw(), unrecognized view_spec['type'] = '" + str(view_spec['type']) + "' encountered.") assert False @@ -182,9 +176,7 @@ def pqw2xyz(pqw, view_spec): return [pqw[2], pqw[0], pqw[1]] elif view_spec['type'] == 'vplane': # Fetch section coordinate system. - origin_xyz = np.array( - view_spec['origin_xyz'] - ) # Make arrays so we can do simple vactor math. + origin_xyz = np.array(view_spec['origin_xyz']) # Make arrays so we can do simple vactor math. p_uxyz = np.array(view_spec['p_uxyz']) # q_uxyz = np.array(view_spec['q_uxyz']) # w_uxyz = np.array(view_spec['w_uxyz']) # @@ -196,11 +188,7 @@ def pqw2xyz(pqw, view_spec): xyz = origin_xyz + (p * p_uxyz) + (q * q_uxyz) + (w * w_uxyz) return [xyz[0], xyz[1], xyz[2]] else: - print( - "ERROR: In pqw2xyz(), unrecognized view_spec['type'] = '" - + str(view_spec['type']) - + "' encountered." - ) + print("ERROR: In pqw2xyz(), unrecognized view_spec['type'] = '" + str(view_spec['type']) + "' encountered.") assert False diff --git a/opencsp/common/lib/render_control/RenderControlAxis.py b/opencsp/common/lib/render_control/RenderControlAxis.py index f7692c3e..540421dc 100644 --- a/opencsp/common/lib/render_control/RenderControlAxis.py +++ b/opencsp/common/lib/render_control/RenderControlAxis.py @@ -9,16 +9,7 @@ class RenderControlAxis: Render control for plot axes. """ - def __init__( - self, - x_label='x', - y_label='y', - z_label='z', - p_label='p', - q_label='q', - w_label='w', - grid=True, - ): + def __init__(self, x_label='x', y_label='y', z_label='z', p_label='p', q_label='q', w_label='w', grid=True): super(RenderControlAxis, self).__init__() # Axis control. @@ -36,13 +27,7 @@ def meters(grid=True): Labels indicating units of meters. """ return RenderControlAxis( - x_label='x (m)', - y_label='y (m)', - z_label='z (m)', - p_label='p (m)', - q_label='q (m)', - w_label='w (m)', - grid=grid, + x_label='x (m)', y_label='y (m)', z_label='z (m)', p_label='p (m)', q_label='q (m)', w_label='w (m)', grid=grid ) diff --git a/opencsp/common/lib/render_control/RenderControlDeflectometryInstrument.py b/opencsp/common/lib/render_control/RenderControlDeflectometryInstrument.py index 5ae0f259..d718d000 100644 --- a/opencsp/common/lib/render_control/RenderControlDeflectometryInstrument.py +++ b/opencsp/common/lib/render_control/RenderControlDeflectometryInstrument.py @@ -119,9 +119,7 @@ def centroid_name(color='k'): def centroid_name_outline( - color='k', - horizontalalignment='center', # center, right, left - verticalalignment='center', + color='k', horizontalalignment='center', verticalalignment='center' # center, right, left ): # center, top, bottom, baseline, center_baseline # Name and overall outline. return RenderControlDeflectometryInstrument( @@ -134,9 +132,7 @@ def centroid_name_outline( draw_facets=False, draw_name=True, name_style=rctxt.RenderControlText( - color=color, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, + color=color, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment ), ) @@ -155,9 +151,7 @@ def outline(color='k'): def name_outline( - color='k', - horizontalalignment='center', # center, right, left - verticalalignment='center', + color='k', horizontalalignment='center', verticalalignment='center' # center, right, left ): # center, top, bottom, baseline, center_baseline # Name and overall outline. return RenderControlDeflectometryInstrument( @@ -169,9 +163,7 @@ def name_outline( draw_facets=False, draw_name=True, name_style=rctxt.RenderControlText( - color=color, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, + color=color, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment ), ) diff --git a/opencsp/common/lib/render_control/RenderControlEvaluateHeliostats3d.py b/opencsp/common/lib/render_control/RenderControlEvaluateHeliostats3d.py index 09953e15..35ea10c3 100644 --- a/opencsp/common/lib/render_control/RenderControlEvaluateHeliostats3d.py +++ b/opencsp/common/lib/render_control/RenderControlEvaluateHeliostats3d.py @@ -30,25 +30,13 @@ def __init__( self.clear_previous = clear_previous self.draw_evaluate_heliostats_3d = draw_evaluate_heliostats_3d self.evaluate_heliostats_3d_points_marker = evaluate_heliostats_3d_points_marker - self.evaluate_heliostats_3d_points_markersize = ( - evaluate_heliostats_3d_points_markersize - ) + self.evaluate_heliostats_3d_points_markersize = evaluate_heliostats_3d_points_markersize self.evaluate_heliostats_3d_points_color = evaluate_heliostats_3d_points_color - self.evaluate_heliostats_3d_label_horizontalalignment = ( - evaluate_heliostats_3d_label_horizontalalignment - ) - self.evaluate_heliostats_3d_label_verticalalignment = ( - evaluate_heliostats_3d_label_verticalalignment - ) - self.evaluate_heliostats_3d_label_fontsize = ( - evaluate_heliostats_3d_label_fontsize - ) - self.evaluate_heliostats_3d_label_fontstyle = ( - evaluate_heliostats_3d_label_fontstyle - ) - self.evaluate_heliostats_3d_label_fontweight = ( - evaluate_heliostats_3d_label_fontweight - ) + self.evaluate_heliostats_3d_label_horizontalalignment = evaluate_heliostats_3d_label_horizontalalignment + self.evaluate_heliostats_3d_label_verticalalignment = evaluate_heliostats_3d_label_verticalalignment + self.evaluate_heliostats_3d_label_fontsize = evaluate_heliostats_3d_label_fontsize + self.evaluate_heliostats_3d_label_fontstyle = evaluate_heliostats_3d_label_fontstyle + self.evaluate_heliostats_3d_label_fontweight = evaluate_heliostats_3d_label_fontweight self.evaluate_heliostats_3d_label_color = evaluate_heliostats_3d_label_color self.evaluate_heliostats_3d_dpi = evaluate_heliostats_3d_dpi self.evaluate_heliostats_3d_crop = evaluate_heliostats_3d_crop @@ -59,8 +47,7 @@ def __init__( def default(color='m'): return RenderControlEvaluateHeliostats3d( - evaluate_heliostats_3d_points_color=color, - evaluate_heliostats_3d_label_color=color, + evaluate_heliostats_3d_points_color=color, evaluate_heliostats_3d_label_color=color ) diff --git a/opencsp/common/lib/render_control/RenderControlFigureRecord.py b/opencsp/common/lib/render_control/RenderControlFigureRecord.py index ae20fd5f..1addd4f6 100644 --- a/opencsp/common/lib/render_control/RenderControlFigureRecord.py +++ b/opencsp/common/lib/render_control/RenderControlFigureRecord.py @@ -50,12 +50,8 @@ def __init__( self.figure = figure self.axis_control = axis_control """ Axis control instance used in figure_management.setup_figure. Can be None|RenderControlAxis. """ - self.metadata: list[str] = ( - [] - ) # A list of standard string fields -- name, figure number, file path, etc. - self.comments: list[str] = ( - [] - ) # A list of caller-defined strings, to be filled in later. + self.metadata: list[str] = [] # A list of standard string fields -- name, figure number, file path, etc. + self.comments: list[str] = [] # A list of caller-defined strings, to be filled in later. self.axis: plt.Axes = None # Matplotlib plot axes object. Set later. self.view: View3d = None # View3d object. Set later. self.equal = None # Whether to make axes equal. Set later. @@ -73,14 +69,7 @@ def print_comments(self): for comment_line in self.comments: lt.info(comment_line) - def save( - self, - output_dir: str, - output_file_body: str = None, - format: str = None, - dpi=600, - close_after_save=True, - ): + def save(self, output_dir: str, output_file_body: str = None, format: str = None, dpi=600, close_after_save=True): """Saves this figure record to an image file. Args: @@ -116,9 +105,7 @@ def save( # If this is a 3-d plot, add the projection choice. if self.view != None: - output_figure_dir_body_ext = self.view.save( - output_dir, output_figure_body, format=format, dpi=dpi - ) + output_figure_dir_body_ext = self.view.save(output_dir, output_figure_body, format=format, dpi=dpi) else: # Make the figure current. plt.figure(self.name) @@ -140,20 +127,14 @@ def save( if orig_format.lower() == "gif": im = PilImage.open(output_figure_dir_body_ext) png_file = output_figure_dir_body_ext - output_figure_dir_body_ext = ( - png_file.rstrip("." + format) + "." + orig_format - ) + output_figure_dir_body_ext = png_file.rstrip("." + format) + "." + orig_format im.save(output_figure_dir_body_ext) ft.delete_file(png_file) # Save the figure explanation. - output_figure_dir, output_figure_body, output_figure_ext = ft.path_components( - output_figure_dir_body_ext - ) + output_figure_dir, output_figure_body, output_figure_ext = ft.path_components(output_figure_dir_body_ext) output_figure_text_body_ext = output_figure_body + '.txt' - output_figure_text_dir_body_ext = os.path.join( - output_figure_dir, output_figure_text_body_ext - ) + output_figure_text_dir_body_ext = os.path.join(output_figure_dir, output_figure_text_body_ext) lt.info('Saving figure text: ' + output_figure_text_dir_body_ext) with open(output_figure_text_dir_body_ext, 'w') as output_stream: # Save the figure metadata. diff --git a/opencsp/common/lib/render_control/RenderControlFramesNoDuplicates.py b/opencsp/common/lib/render_control/RenderControlFramesNoDuplicates.py index 8643417f..57ee0f92 100644 --- a/opencsp/common/lib/render_control/RenderControlFramesNoDuplicates.py +++ b/opencsp/common/lib/render_control/RenderControlFramesNoDuplicates.py @@ -24,10 +24,7 @@ def __init__( if "outframe_format" not in kwargs: kwargs["outframe_format"] = frame_format super().__init__( - clear_dir=clear_dir, - draw_example_frames=draw_example_frames, - example_dpi=example_dpi, - **kwargs + clear_dir=clear_dir, draw_example_frames=draw_example_frames, example_dpi=example_dpi, **kwargs ) diff --git a/opencsp/common/lib/render_control/RenderControlHeliostat.py b/opencsp/common/lib/render_control/RenderControlHeliostat.py index ff22d536..3a207c37 100644 --- a/opencsp/common/lib/render_control/RenderControlHeliostat.py +++ b/opencsp/common/lib/render_control/RenderControlHeliostat.py @@ -125,9 +125,7 @@ def centroid_name(color='k'): def centroid_name_outline( - color='k', - horizontalalignment='center', # center, right, left - verticalalignment='center', + color='k', horizontalalignment='center', verticalalignment='center' # center, right, left ): # center, top, bottom, baseline, center_baseline # Name and overall outline. return RenderControlHeliostat( @@ -140,9 +138,7 @@ def centroid_name_outline( draw_facets=False, draw_name=True, name_style=rctxt.RenderControlText( - color=color, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, + color=color, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment ), ) @@ -161,9 +157,7 @@ def outline(color='k'): def name_outline( - color='k', - horizontalalignment='center', # center, right, left - verticalalignment='center', + color='k', horizontalalignment='center', verticalalignment='center' # center, right, left ): # center, top, bottom, baseline, center_baseline # Name and overall outline. return RenderControlHeliostat( @@ -175,9 +169,7 @@ def name_outline( draw_facets=False, draw_name=True, name_style=rctxt.RenderControlText( - color=color, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, + color=color, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment ), ) diff --git a/opencsp/common/lib/render_control/RenderControlHeliostatTracks.py b/opencsp/common/lib/render_control/RenderControlHeliostatTracks.py index 86101151..9e36a09c 100644 --- a/opencsp/common/lib/render_control/RenderControlHeliostatTracks.py +++ b/opencsp/common/lib/render_control/RenderControlHeliostatTracks.py @@ -32,12 +32,8 @@ def __init__( self.heliostat_tracks_points_marker = heliostat_tracks_points_marker self.heliostat_tracks_points_markersize = heliostat_tracks_points_markersize self.heliostat_tracks_points_color = heliostat_tracks_points_color - self.heliostat_tracks_label_horizontalalignment = ( - heliostat_tracks_label_horizontalalignment - ) - self.heliostat_tracks_label_verticalalignment = ( - heliostat_tracks_label_verticalalignment - ) + self.heliostat_tracks_label_horizontalalignment = heliostat_tracks_label_horizontalalignment + self.heliostat_tracks_label_verticalalignment = heliostat_tracks_label_verticalalignment self.heliostat_tracks_label_fontsize = heliostat_tracks_label_fontsize self.heliostat_tracks_label_fontstyle = heliostat_tracks_label_fontstyle self.heliostat_tracks_label_fontweight = heliostat_tracks_label_fontweight @@ -50,9 +46,7 @@ def __init__( def default(color='m'): - return RenderControlHeliostatTracks( - heliostat_tracks_points_color=color, heliostat_tracks_label_color=color - ) + return RenderControlHeliostatTracks(heliostat_tracks_points_color=color, heliostat_tracks_label_color=color) def fast(): diff --git a/opencsp/common/lib/render_control/RenderControlHeliostats3d.py b/opencsp/common/lib/render_control/RenderControlHeliostats3d.py index c750ca7c..5433526e 100644 --- a/opencsp/common/lib/render_control/RenderControlHeliostats3d.py +++ b/opencsp/common/lib/render_control/RenderControlHeliostats3d.py @@ -32,12 +32,8 @@ def __init__( self.heliostats_3d_points_marker = heliostats_3d_points_marker self.heliostats_3d_points_markersize = heliostats_3d_points_markersize self.heliostats_3d_points_color = heliostats_3d_points_color - self.heliostats_3d_label_horizontalalignment = ( - heliostats_3d_label_horizontalalignment - ) - self.heliostats_3d_label_verticalalignment = ( - heliostats_3d_label_verticalalignment - ) + self.heliostats_3d_label_horizontalalignment = heliostats_3d_label_horizontalalignment + self.heliostats_3d_label_verticalalignment = heliostats_3d_label_verticalalignment self.heliostats_3d_label_fontsize = heliostats_3d_label_fontsize self.heliostats_3d_label_fontstyle = heliostats_3d_label_fontstyle self.heliostats_3d_label_fontweight = heliostats_3d_label_fontweight @@ -50,9 +46,7 @@ def __init__( def default(color='m'): - return RenderControlHeliostats3d( - heliostats_3d_points_color=color, heliostats_3d_label_color=color - ) + return RenderControlHeliostats3d(heliostats_3d_points_color=color, heliostats_3d_label_color=color) def fast(): diff --git a/opencsp/common/lib/render_control/RenderControlKeyCorners.py b/opencsp/common/lib/render_control/RenderControlKeyCorners.py index e2107bcd..13ce7382 100644 --- a/opencsp/common/lib/render_control/RenderControlKeyCorners.py +++ b/opencsp/common/lib/render_control/RenderControlKeyCorners.py @@ -32,9 +32,7 @@ def __init__( self.key_corners_points_marker = key_corners_points_marker self.key_corners_points_markersize = key_corners_points_markersize self.key_corners_points_color = key_corners_points_color - self.key_corners_label_horizontalalignment = ( - key_corners_label_horizontalalignment - ) + self.key_corners_label_horizontalalignment = key_corners_label_horizontalalignment self.key_corners_label_verticalalignment = key_corners_label_verticalalignment self.key_corners_label_fontsize = key_corners_label_fontsize self.key_corners_label_fontstyle = key_corners_label_fontstyle diff --git a/opencsp/common/lib/render_control/RenderControlMirror.py b/opencsp/common/lib/render_control/RenderControlMirror.py index 8cb560d2..03ab7b16 100644 --- a/opencsp/common/lib/render_control/RenderControlMirror.py +++ b/opencsp/common/lib/render_control/RenderControlMirror.py @@ -1,7 +1,5 @@ import opencsp.common.lib.render_control.RenderControlPointSeq as rcps -from opencsp.common.lib.render_control.RenderControlPointSeq import ( - RenderControlPointSeq, -) +from opencsp.common.lib.render_control.RenderControlPointSeq import RenderControlPointSeq from opencsp.common.lib.render_control.RenderControlSurface import RenderControlSurface diff --git a/opencsp/common/lib/render_control/RenderControlPointSeq.py b/opencsp/common/lib/render_control/RenderControlPointSeq.py index 9594bdcf..a5b0fff4 100644 --- a/opencsp/common/lib/render_control/RenderControlPointSeq.py +++ b/opencsp/common/lib/render_control/RenderControlPointSeq.py @@ -118,47 +118,31 @@ def default(marker='o', color='b', linewidth=1, markersize=8): """ What to draw if no particular preference is expressed. """ - return RenderControlPointSeq( - linestyle='-', linewidth=1, color=color, marker='.', markersize=markersize - ) + return RenderControlPointSeq(linestyle='-', linewidth=1, color=color, marker='.', markersize=markersize) def outline(color='k', linewidth=1): """ Outlines of physical objects. """ - return RenderControlPointSeq( - linestyle='-', linewidth=linewidth, color=color, marker='None' - ) + return RenderControlPointSeq(linestyle='-', linewidth=linewidth, color=color, marker='None') -def data_curve( - color='b', linewidth=1, marker='.', markersize=3 -) -> RenderControlPointSeq: +def data_curve(color='b', linewidth=1, marker='.', markersize=3) -> RenderControlPointSeq: """ A data curve with data points identified. """ - return RenderControlPointSeq( - linestyle='-', - linewidth=linewidth, - color=color, - marker=marker, - markersize=markersize, - ) + return RenderControlPointSeq(linestyle='-', linewidth=linewidth, color=color, marker=marker, markersize=markersize) def marker(marker='o', color='b', markersize=3) -> RenderControlPointSeq: """ A data curve with data points identified. """ - return RenderControlPointSeq( - linestyle='None', color=color, marker=marker, markersize=markersize - ) + return RenderControlPointSeq(linestyle='None', color=color, marker=marker, markersize=markersize) -def vector_field( - marker='.', color='b', markersize=3, vector_linewidth=1, vector_scale=1.0 -) -> RenderControlPointSeq: +def vector_field(marker='.', color='b', markersize=3, vector_linewidth=1, vector_scale=1.0) -> RenderControlPointSeq: """ A field of vector needles. """ diff --git a/opencsp/common/lib/render_control/RenderControlPowerpointPresentation.py b/opencsp/common/lib/render_control/RenderControlPowerpointPresentation.py index 3a524320..e0b2048e 100644 --- a/opencsp/common/lib/render_control/RenderControlPowerpointPresentation.py +++ b/opencsp/common/lib/render_control/RenderControlPowerpointPresentation.py @@ -9,11 +9,7 @@ class RenderControlPowerpointPresentation: - def __init__( - self, - new_slides: list[pps.PowerpointSlide] = None, - existing_presentation_path_name_ext: str = None, - ): + def __init__(self, new_slides: list[pps.PowerpointSlide] = None, existing_presentation_path_name_ext: str = None): """Create a new presentation instance to which to add slides. Example:: @@ -83,19 +79,13 @@ def save(self, dest_path_name_ext: str, overwrite=False): # setup tmp_dir = os.path.join( - orp.opencsp_temporary_dir(), - "powerpoint_presentations", - tdt.current_date_time_string_forfile(), + orp.opencsp_temporary_dir(), "powerpoint_presentations", tdt.current_date_time_string_forfile() ) # render all the slides for slide_list_idx, pps_slide in enumerate(self.new_slides): control = pps_slide.slide_control - layout = ( - self.get_title_layout() - if control.is_title_slide - else self.get_content_layout() - ) + layout = self.get_title_layout() if control.is_title_slide else self.get_content_layout() if control.slide_index < 0 or control.slide_index == slide_list_idx: pass @@ -106,10 +96,7 @@ def save(self, dest_path_name_ext: str, overwrite=False): tmp_render_path = os.path.join(tmp_dir, f"slide_{slide_list_idx}") if ft.directory_exists(tmp_render_path): - lt.error_and_raise( - FileExistsError, - f"Temporary rendering directory {tmp_render_path} already exists!", - ) + lt.error_and_raise(FileExistsError, f"Temporary rendering directory {tmp_render_path} already exists!") slide = pps_slide.render(self, layout, tmp_render_path) # check if the file already exists @@ -117,8 +104,7 @@ def save(self, dest_path_name_ext: str, overwrite=False): # check again, just to be safe if not overwrite: lt.error_and_raise( - RuntimeError, - f"Reached unreachable code!!! (file {dest_path_name_ext} already exists)", + RuntimeError, f"Reached unreachable code!!! (file {dest_path_name_ext} already exists)" ) # save to a temporary file diff --git a/opencsp/common/lib/render_control/RenderControlSolarField.py b/opencsp/common/lib/render_control/RenderControlSolarField.py index 35c2e330..54e0bf45 100644 --- a/opencsp/common/lib/render_control/RenderControlSolarField.py +++ b/opencsp/common/lib/render_control/RenderControlSolarField.py @@ -43,10 +43,7 @@ def default(): def outline(color='k'): # Overall field outline only. return RenderControlSolarField( - draw_outline=True, - outline_style=rcps.outline(color=color), - draw_heliostats=False, - draw_name=False, + draw_outline=True, outline_style=rcps.outline(color=color), draw_heliostats=False, draw_name=False ) @@ -115,18 +112,14 @@ def heliostat_outlines_names(color='k'): def heliostat_centroids_outlines_names( - color='k', - horizontalalignment='center', # center, right, left - verticalalignment='center', + color='k', horizontalalignment='center', verticalalignment='center' # center, right, left ): # center, top, bottom, baseline, center_baseline return RenderControlSolarField( draw_outline=False, draw_heliostats=True, heliostat_styles=rce.RenderControlEnsemble( rch.centroid_name_outline( - color=color, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, + color=color, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment ) ), draw_name=False, @@ -138,9 +131,7 @@ def heliostat_vector_field(color='k', vector_length=9): draw_outline=False, outline_style=rcps.outline(color=color), draw_heliostats=True, - heliostat_styles=rce.RenderControlEnsemble( - rch.normal(color=color, surface_normal_length=vector_length) - ), + heliostat_styles=rce.RenderControlEnsemble(rch.normal(color=color, surface_normal_length=vector_length)), draw_name=False, ) diff --git a/opencsp/common/lib/render_control/RenderControlText.py b/opencsp/common/lib/render_control/RenderControlText.py index f1eec68e..ea2f872d 100644 --- a/opencsp/common/lib/render_control/RenderControlText.py +++ b/opencsp/common/lib/render_control/RenderControlText.py @@ -77,19 +77,11 @@ def default(fontsize='medium', color='b'): """ What to draw if no particular preference is expressed. """ - return RenderControlText( - fontsize=fontsize, - fontstyle='normal', - fontweight='normal', - zdir=None, - color=color, - ) + return RenderControlText(fontsize=fontsize, fontstyle='normal', fontweight='normal', zdir=None, color=color) def bold(fontsize='medium', color='b'): """ What to draw for emphasis. """ - return RenderControlText( - fontsize=fontsize, fontstyle='normal', fontweight='bold', zdir=None, color=color - ) + return RenderControlText(fontsize=fontsize, fontstyle='normal', fontweight='bold', zdir=None, color=color) diff --git a/opencsp/common/lib/render_control/RenderControlTrajectoryAnalysis.py b/opencsp/common/lib/render_control/RenderControlTrajectoryAnalysis.py index bba5f24d..0a893188 100644 --- a/opencsp/common/lib/render_control/RenderControlTrajectoryAnalysis.py +++ b/opencsp/common/lib/render_control/RenderControlTrajectoryAnalysis.py @@ -32,12 +32,8 @@ def __init__( self.heliostats_3d_points_marker = heliostats_3d_points_marker self.heliostats_3d_points_markersize = heliostats_3d_points_markersize self.heliostats_3d_points_color = heliostats_3d_points_color - self.heliostats_3d_label_horizontalalignment = ( - heliostats_3d_label_horizontalalignment - ) - self.heliostats_3d_label_verticalalignment = ( - heliostats_3d_label_verticalalignment - ) + self.heliostats_3d_label_horizontalalignment = heliostats_3d_label_horizontalalignment + self.heliostats_3d_label_verticalalignment = heliostats_3d_label_verticalalignment self.heliostats_3d_label_fontsize = heliostats_3d_label_fontsize self.heliostats_3d_label_fontstyle = heliostats_3d_label_fontstyle self.heliostats_3d_label_fontweight = heliostats_3d_label_fontweight @@ -50,9 +46,7 @@ def __init__( def default(color='m'): - return RenderControlTrajectoryAnalysis( - heliostats_3d_points_color=color, heliostats_3d_label_color=color - ) + return RenderControlTrajectoryAnalysis(heliostats_3d_points_color=color, heliostats_3d_label_color=color) def fast(): diff --git a/opencsp/common/lib/render_control/RenderControlVideo.py b/opencsp/common/lib/render_control/RenderControlVideo.py index 516a4abd..c1a71621 100644 --- a/opencsp/common/lib/render_control/RenderControlVideo.py +++ b/opencsp/common/lib/render_control/RenderControlVideo.py @@ -29,9 +29,7 @@ def __init__( if codec not in ['undefined', 'H.264', 'H.265', 'copy']: raise RuntimeError(f"Unrecognized codec option '{self.codec}'") if low_bitrate and codec in ["undefined", "copy"]: - raise RuntimeError( - "Codec must be specified in order to use low_bitrate=True" - ) + raise RuntimeError("Codec must be specified in order to use low_bitrate=True") def _get_original_video_width_height(self, video_or_image_path_name_ext: str): if self._original_video_width_height == None: @@ -41,9 +39,7 @@ def _get_original_video_width_height(self, video_or_image_path_name_ext: str): self._original_video_width_height = handler.get_width_height() return self._original_video_width_height - def get_ffmpeg_args( - self, video_or_image_path_name_ext: str = "" - ) -> tuple[str, dict[str, str]]: + def get_ffmpeg_args(self, video_or_image_path_name_ext: str = "") -> tuple[str, dict[str, str]]: """Get the arguments and directories to be passed to ffmpeg. Args: @@ -76,14 +72,10 @@ def get_ffmpeg_args( if video_or_image_path_name_ext != "": def owidth(): - return self._get_original_video_width_height( - video_or_image_path_name_ext - )[0] + return self._get_original_video_width_height(video_or_image_path_name_ext)[0] def oheight(): - return self._get_original_video_width_height( - video_or_image_path_name_ext - )[1] + return self._get_original_video_width_height(video_or_image_path_name_ext)[1] if self.min_scale: if width != None: @@ -120,15 +112,8 @@ def default(cls): return cls() @classmethod - def power_point( - cls, framerate=10, width=320, height=None, codec='H.264', low_bitrate=False - ): + def power_point(cls, framerate=10, width=320, height=None, codec='H.264', low_bitrate=False): """Returns a set of defaults suitable for embedding videos into powerpoint.""" return cls( - framerate=framerate, - width=width, - height=height, - min_scale=True, - codec=codec, - low_bitrate=low_bitrate, + framerate=framerate, width=width, height=height, min_scale=True, codec=codec, low_bitrate=low_bitrate ) diff --git a/opencsp/common/lib/render_control/RenderControlVideoFrames.py b/opencsp/common/lib/render_control/RenderControlVideoFrames.py index 85f34a32..33379cf8 100644 --- a/opencsp/common/lib/render_control/RenderControlVideoFrames.py +++ b/opencsp/common/lib/render_control/RenderControlVideoFrames.py @@ -58,9 +58,7 @@ def __init__( self.outframe_format = outframe_format self.outframe_dpi = outframe_dpi self.example_name = example_name if example_name != None else outframe_name - self.example_format = ( - example_format if example_format != None else outframe_format - ) + self.example_format = example_format if example_format != None else outframe_format self.example_dpi = example_dpi if example_dpi != None else outframe_dpi self.example_freq = example_freq self.draw_example_frames = draw_example_frames @@ -95,17 +93,13 @@ def clean_dir(self, dir: str, remove_only_images=False): if self.draw_example_frames and self.example_format != self.outframe_format: extensions.append(self.example_format) - files_name_ext_dict = ft.files_in_directory_by_extension( - dir, sort=False, extensions=extensions - ) + files_name_ext_dict = ft.files_in_directory_by_extension(dir, sort=False, extensions=extensions) for extension in files_name_ext_dict.keys(): files_name_ext = files_name_ext_dict[extension] for fn in files_name_ext: ft.delete_file(os.path.join(dir, fn), error_on_not_exists=False) - def get_outframe_name( - self, source_video_dir_body_ext: str = None, is_example_frames=False - ): + def get_outframe_name(self, source_video_dir_body_ext: str = None, is_example_frames=False): """Returns the format string for generating frame names (name+ext only)""" # get the name and extension name, ext = self.outframe_name, self.outframe_format @@ -123,19 +117,13 @@ def get_outframe_name( return name_ext def get_outframe_path_name_ext( - self, - destination_dir: str, - source_video_dir_body_ext: str = None, - is_example_frames=False, + self, destination_dir: str, source_video_dir_body_ext: str = None, is_example_frames=False ): name_ext = self.get_outframe_name(source_video_dir_body_ext, is_example_frames) return os.path.join(destination_dir, name_ext) def get_ffmpeg_args( - self, - destination_dir: str, - source_video_dir_body_ext: str = None, - is_example_frames=False, + self, destination_dir: str, source_video_dir_body_ext: str = None, is_example_frames=False ) -> tuple[str, dict[str, str]]: """Get the ffmpeg arguments for extracting either extracted or example frames. diff --git a/opencsp/common/lib/render_control/RenderControlVideoTracks.py b/opencsp/common/lib/render_control/RenderControlVideoTracks.py index aee4a72a..37d20637 100644 --- a/opencsp/common/lib/render_control/RenderControlVideoTracks.py +++ b/opencsp/common/lib/render_control/RenderControlVideoTracks.py @@ -32,9 +32,7 @@ def __init__( self.video_tracks_points_marker = video_tracks_points_marker self.video_tracks_points_markersize = video_tracks_points_markersize self.video_tracks_points_color = video_tracks_points_color - self.video_tracks_label_horizontalalignment = ( - video_tracks_label_horizontalalignment - ) + self.video_tracks_label_horizontalalignment = video_tracks_label_horizontalalignment self.video_tracks_label_verticalalignment = video_tracks_label_verticalalignment self.video_tracks_label_fontsize = video_tracks_label_fontsize self.video_tracks_label_fontstyle = video_tracks_label_fontstyle @@ -48,9 +46,7 @@ def __init__( def default(color='m'): - return RenderControlVideoTracks( - video_tracks_points_color=color, video_tracks_label_color=color - ) + return RenderControlVideoTracks(video_tracks_points_color=color, video_tracks_label_color=color) def fast(): diff --git a/opencsp/common/lib/target/TargetAbstract.py b/opencsp/common/lib/target/TargetAbstract.py index 76bdc176..da117595 100755 --- a/opencsp/common/lib/target/TargetAbstract.py +++ b/opencsp/common/lib/target/TargetAbstract.py @@ -18,10 +18,7 @@ class TargetAbstract(ABC): """ def __init__( - self, - image_width: float, # Meters - image_height: float, # Meters - dpm: float, # dots per meter + self, image_width: float, image_height: float, dpm: float # Meters # Meters # dots per meter ) -> None: super().__init__() self.image_width = image_width @@ -29,9 +26,7 @@ def __init__( self.dpm = dpm self.comments = ["Target Comments:"] # Construct image object. - self.image = ti.construct_target_image( - self.image_width, self.image_height, self.dpm - ) + self.image = ti.construct_target_image(self.image_width, self.image_height, self.dpm) # Set initial pattern. self.pattern_description = 'blank' # ?? SCAFFOLDING RCB -- RENAME THIS VARIABLE TO "NAME"? SEE splice_targets_above_below() FOR MAYBE REASON WHY @@ -49,9 +44,7 @@ def rows_cols_bands(self): return n_rows, n_cols, n_bands def image_size_str_meter(self) -> str: - return 'w{w:.3f}m_h{h:.3f}m_{dpm:.1f}dpm'.format( - w=self.image_width, h=self.image_height, dpm=round(self.dpm) - ) + return 'w{w:.3f}m_h{h:.3f}m_{dpm:.1f}dpm'.format(w=self.image_width, h=self.image_height, dpm=round(self.dpm)) def image_size_str_inch(self) -> str: return 'w{w:.3f}in_h{h:.3f}in_{dpi:d}dpi'.format( diff --git a/opencsp/common/lib/target/TargetColor.py b/opencsp/common/lib/target/TargetColor.py index 3dbba6d3..2a074b98 100755 --- a/opencsp/common/lib/target/TargetColor.py +++ b/opencsp/common/lib/target/TargetColor.py @@ -34,9 +34,7 @@ def __init__( dpm: float, # dots per meter initial_color: Color.Color, # Color to fill canvas before adding patterns. ) -> None: - super().__init__( - image_width, image_height, dpm - ) # initalizes the attributes universal to all mirrors + super().__init__(image_width, image_height, dpm) # initalizes the attributes universal to all mirrors # Set initial pattern. self.initial_color = initial_color self.pattern_description = initial_color.name @@ -63,9 +61,7 @@ def rows_cols( n_cols = self.image.shape[1] n_bands = self.image.shape[2] if n_bands != 3: - print( - 'ERROR: In TargetAbstract.row_cols(), number of input image bands is not 3.' - ) + print('ERROR: In TargetAbstract.row_cols(), number of input image bands is not 3.') assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION return n_rows, n_cols @@ -76,11 +72,7 @@ def set_pattern_description(self, description: str) -> None: # Linear color bar, x direction def set_image_to_linear_color_bar_x( - self, - color_below_min: Color, - color_bar, - color_above_max: Color, - discrete_or_continuous: str, + self, color_below_min: Color, color_bar, color_above_max: Color, discrete_or_continuous: str ) -> None: n_rows, n_cols = self.rows_cols() for row in range(0, n_rows): @@ -90,13 +82,7 @@ def set_image_to_linear_color_bar_x( val_min = 0 val_max = n_cols color = tcc.color_given_value( - val, - val_min, - val_max, - color_below_min, - color_bar, - color_above_max, - discrete_or_continuous, + val, val_min, val_max, color_below_min, color_bar, color_above_max, discrete_or_continuous ) # Set pixel color # ?? SCAFFOLDING RCB -- FIXUP ALL THIS CONFUSION REGARDING WHETHER COLORS ARE OVER [0,1] OR [0,255]. @@ -126,21 +112,13 @@ def set_image_to_linear_color_bar_y( val_min = 0 val_max = n_rows # Last row in color bar is the final color; there is not a color beyond. color = tcc.color_given_value( - val, - val_min, - val_max, - color_below_min, - color_bar, - color_above_max, - discrete_or_continuous, + val, val_min, val_max, color_below_min, color_bar, color_above_max, discrete_or_continuous ) # ?? SCAFFOLDING -- USE "SPLIT" CONTROL PARAMETER. # Adjust saturation. lateral_fraction = col / n_cols # Color components. - this_red = color[ - 0 - ] # /255.0 # ?? SCAFFOLDING RCB -- CONVERT COLOR BAR TO INTERVAL [0,1] + this_red = color[0] # /255.0 # ?? SCAFFOLDING RCB -- CONVERT COLOR BAR TO INTERVAL [0,1] this_green = color[1] # /255.0 this_blue = color[2] # /255.0 @@ -149,9 +127,7 @@ def set_image_to_linear_color_bar_y( elif lateral_gradient_type == "saturated_to_white": # Transition from saturated at left to white. - saturation_factor = 1.0 - pow( - lateral_fraction, saturated_to_white_exponent - ) + saturation_factor = 1.0 - pow(lateral_fraction, saturated_to_white_exponent) if saturation_factor < 0.0: saturation_factor = 0.0 if saturation_factor > 1.0: @@ -169,9 +145,7 @@ def set_image_to_linear_color_bar_y( elif lateral_gradient_type == "light_to_saturated": # Transition from partially saturated at left to fully saturated at boundary. saturation_range = light_to_saturated_max - light_to_saturated_min - saturation_factor = light_to_saturated_min + ( - lateral_fraction * saturation_range - ) + saturation_factor = light_to_saturated_min + (lateral_fraction * saturation_range) if saturation_factor < 0.0: saturation_factor = 0.0 if saturation_factor > 1.0: @@ -251,13 +225,7 @@ def set_image_to_polar_color_bar( this_radius = math.sqrt((delta_x * delta_x) + (delta_y * delta_y)) # Lookup color given angle.# (Saturation not adjusted yet.) color = tcc.color_given_value( - this_angle, - -math.pi, - math.pi, - color_below_min, - color_bar, - color_above_max, - discrete_or_continuous, + this_angle, -math.pi, math.pi, color_below_min, color_bar, color_above_max, discrete_or_continuous ) # Compute saturation adjustment. # Determine the radius to use for scaling the saturation. @@ -272,8 +240,7 @@ def set_image_to_polar_color_bar( radius_for_this_angle = abs(half_height / math.sin(this_angle)) else: radius_for_this_angle = min( - abs(half_width / math.cos(this_angle)), - abs(half_height / math.sin(this_angle)), + abs(half_width / math.cos(this_angle)), abs(half_height / math.sin(this_angle)) ) else: print( @@ -289,17 +256,13 @@ def set_image_to_polar_color_bar( # Adjust saturation. # Color components. - this_red = color[ - 0 - ] # /255.0 # ?? SCAFFOLDING RCB -- CONVERT COLOR BAR TO INTERVAL [0,1] + this_red = color[0] # /255.0 # ?? SCAFFOLDING RCB -- CONVERT COLOR BAR TO INTERVAL [0,1] this_green = color[1] # /255.0 this_blue = color[2] # /255.0 if radial_gradient_type == "saturated_center_to_white": # Transition from saturated at center to white. - saturation_factor = 1.0 - pow( - radius_fraction, saturated_center_to_white_exponent - ) + saturation_factor = 1.0 - pow(radius_fraction, saturated_center_to_white_exponent) if saturation_factor < 0.0: saturation_factor = 0.0 if saturation_factor > 1.0: @@ -317,12 +280,9 @@ def set_image_to_polar_color_bar( elif radial_gradient_type == "light_center_to_saturated": # Transition from partially saturated at center to fully saturated at boundary. saturation_range = ( - light_center_to_saturated_saturation_max - - light_center_to_saturated_saturation_min - ) - saturation_factor = light_center_to_saturated_saturation_min + ( - radius_fraction * saturation_range + light_center_to_saturated_saturation_max - light_center_to_saturated_saturation_min ) + saturation_factor = light_center_to_saturated_saturation_min + (radius_fraction * saturation_range) if saturation_factor < 0.0: saturation_factor = 0.0 if saturation_factor > 1.0: @@ -363,12 +323,8 @@ def set_image_to_polar_color_bar( if draw_center_fiducial: self.set_center_fiducial(center_fiducial_width_pix, center_fiducial_color) if draw_edge_fiducials: - self.set_ticks_along_top_and_bottom_edges( - n_ticks_x, tick_length, tick_width_pix, tick_color - ) - self.set_ticks_along_left_and_right_edges( - n_ticks_y, tick_length, tick_width_pix, tick_color - ) + self.set_ticks_along_top_and_bottom_edges(n_ticks_x, tick_length, tick_width_pix, tick_color) + self.set_ticks_along_left_and_right_edges(n_ticks_y, tick_length, tick_width_pix, tick_color) # Fiducial tick marks. def set_center_fiducial(self, center_fiducial_width_pix, center_fiducial_color): @@ -382,28 +338,18 @@ def set_center_fiducial(self, center_fiducial_width_pix, center_fiducial_color): # Number of pixels in center_fiducial either side of center point. Use int() to intentionally truncate. center_fiducial_half_margin_pix = int(center_fiducial_width_pix / 2.0) if center_fiducial_half_margin_pix == 0: - if ((center_row >= 0) and (center_row < n_rows)) and ( - (center_col >= 0) and (center_col < n_cols) - ): + if ((center_row >= 0) and (center_row < n_rows)) and ((center_col >= 0) and (center_col < n_cols)): self.set_fiducial_pixel(center_row, center_col, center_fiducial_color) elif center_fiducial_half_margin_pix > 0: for this_row in range( - center_row - center_fiducial_half_margin_pix, - (center_row + center_fiducial_half_margin_pix) + 1, + center_row - center_fiducial_half_margin_pix, (center_row + center_fiducial_half_margin_pix) + 1 ): for this_col in range( - center_col - center_fiducial_half_margin_pix, - (center_col + center_fiducial_half_margin_pix) + 1, + center_col - center_fiducial_half_margin_pix, (center_col + center_fiducial_half_margin_pix) + 1 ): - if ((this_row >= 0) and (this_row < n_rows)) and ( - (this_col >= 0) and (this_col < n_cols) - ): - self.set_fiducial_pixel( - this_row, this_col, center_fiducial_color - ) - self.set_fiducial_pixel( - this_row, this_col, center_fiducial_color - ) + if ((this_row >= 0) and (this_row < n_rows)) and ((this_col >= 0) and (this_col < n_cols)): + self.set_fiducial_pixel(this_row, this_col, center_fiducial_color) + self.set_fiducial_pixel(this_row, this_col, center_fiducial_color) else: print( 'ERROR: In TargetColor.set_center_fiducial(), unexpected negative center_fiducial_half_margin_pix = ' @@ -411,9 +357,7 @@ def set_center_fiducial(self, center_fiducial_width_pix, center_fiducial_color): ) assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION. - def set_ticks_along_top_and_bottom_edges( - self, n_ticks_x, tick_length, tick_width_pix, tick_color - ): + def set_ticks_along_top_and_bottom_edges(self, n_ticks_x, tick_length, tick_width_pix, tick_color): n_rows, n_cols = self.rows_cols() width = n_cols dx_tick = width / (n_ticks_x - 1) @@ -429,27 +373,16 @@ def set_ticks_along_top_and_bottom_edges( if tick_half_margin_pix == 0: if col_tick < 0: col_tick = 0 - if ( - col_tick == n_cols - ): # Use "==" because if tick is past end of image, don't draw. + if col_tick == n_cols: # Use "==" because if tick is past end of image, don't draw. col_tick = n_cols - 1 if (col_tick >= 0) and (col_tick < n_cols): self.set_fiducial_pixel(this_row_from_top, col_tick, tick_color) - self.set_fiducial_pixel( - this_row_from_bottom, col_tick, tick_color - ) + self.set_fiducial_pixel(this_row_from_bottom, col_tick, tick_color) elif tick_half_margin_pix > 0: - for this_col in range( - col_tick - tick_half_margin_pix, - (col_tick + tick_half_margin_pix) + 1, - ): + for this_col in range(col_tick - tick_half_margin_pix, (col_tick + tick_half_margin_pix) + 1): if (this_col >= 0) and (this_col < n_cols): - self.set_fiducial_pixel( - this_row_from_top, this_col, tick_color - ) - self.set_fiducial_pixel( - this_row_from_bottom, this_col, tick_color - ) + self.set_fiducial_pixel(this_row_from_top, this_col, tick_color) + self.set_fiducial_pixel(this_row_from_bottom, this_col, tick_color) else: print( 'ERROR: In TargetColor.set_ticks_along_top_and_bottom_edges(), unexpected negative tick_half_margin_pix = ' @@ -457,9 +390,7 @@ def set_ticks_along_top_and_bottom_edges( ) assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION. - def set_ticks_along_left_and_right_edges( - self, n_ticks_y, tick_length, tick_width_pix, tick_color - ): + def set_ticks_along_left_and_right_edges(self, n_ticks_y, tick_length, tick_width_pix, tick_color): n_rows, n_cols = self.rows_cols() height = n_rows dy_tick = height / (n_ticks_y - 1) @@ -475,29 +406,16 @@ def set_ticks_along_left_and_right_edges( if tick_half_margin_pix == 0: if row_tick < 0: row_tick = 0 - if ( - row_tick == n_rows - ): # Use "==" because if tick is past end of image, don't draw. + if row_tick == n_rows: # Use "==" because if tick is past end of image, don't draw. row_tick = n_rows - 1 if (row_tick >= 0) and (row_tick < n_rows): - self.set_fiducial_pixel( - row_tick, this_col_from_left, tick_color - ) - self.set_fiducial_pixel( - row_tick, this_col_from_right, tick_color - ) + self.set_fiducial_pixel(row_tick, this_col_from_left, tick_color) + self.set_fiducial_pixel(row_tick, this_col_from_right, tick_color) elif tick_half_margin_pix > 0: - for this_row in range( - row_tick - tick_half_margin_pix, - (row_tick + tick_half_margin_pix) + 1, - ): + for this_row in range(row_tick - tick_half_margin_pix, (row_tick + tick_half_margin_pix) + 1): if (this_row >= 0) and (this_row < n_rows): - self.set_fiducial_pixel( - this_row, this_col_from_left, tick_color - ) - self.set_fiducial_pixel( - this_row, this_col_from_right, tick_color - ) + self.set_fiducial_pixel(this_row, this_col_from_left, tick_color) + self.set_fiducial_pixel(this_row, this_col_from_right, tick_color) else: print( 'ERROR: In TargetColor.set_ticks_along_left_and_right_edges(), unexpected negative tick_half_margin_pix = ' @@ -521,9 +439,7 @@ def set_image_to_blue_under_red_cross_green(self): # Square inscribed in the [R,G,B] space basis vector hexagon. def set_image_to_rgb_cube_inscribed_square(self, project_to_cube): n_rows, n_cols = self.rows_cols() - self.image = tc2r.construct_rgb_cube_inscribed_square_image( - n_cols, n_rows, project_to_cube - ) + self.image = tc2r.construct_rgb_cube_inscribed_square_image(n_cols, n_rows, project_to_cube) # Compute color saturation adjustment. # ?? SCAFFOLDING RCB -- ADD TYPE TIPS. @@ -535,12 +451,8 @@ def adjust_rgb_color_saturation(self, rgb, saturation_fraction, max_rgb): # Compute new color. # ?? SCAFFOLDING RCB -- DOCUMENT ABANDONED MID-IMPLEMENTATION new_red = original_red * saturation_fraction # ?? SCAFFOLDING RCB -- TEMPORARY - new_green = ( - original_green * saturation_fraction - ) # ?? SCAFFOLDING RCB -- TEMPORARY - new_blue = ( - original_blue * saturation_fraction - ) # ?? SCAFFOLDING RCB -- TEMPORARY + new_green = original_green * saturation_fraction # ?? SCAFFOLDING RCB -- TEMPORARY + new_blue = original_blue * saturation_fraction # ?? SCAFFOLDING RCB -- TEMPORARY # Return. return (new_red, new_green, new_blue) @@ -613,27 +525,17 @@ def construct_target_linear_color_bar( ) else: - print( - 'ERROR: In construct_target_linear_color_bar(), x_or_y has unexpected value "' - + str(x_or_y) - + '"' - ) + print('ERROR: In construct_target_linear_color_bar(), x_or_y has unexpected value "' + str(x_or_y) + '"') assert False # Set pattern description. include_above_below_in_pattern_name = True if include_above_below_in_pattern_name: color_pattern_name = ( - color_below_min.short_name - + '.' - + color_bar_name - + '_linear.' - + color_above_max.short_name + color_below_min.short_name + '.' + color_bar_name + '_linear.' + color_above_max.short_name ) # ?? SCAFFOLDING RCB -- REPLACE "color_bar_name" WITH CLASS FETCH else: color_pattern_name = color_bar_name # ?? SCAFFOLDING RCB -- REPLACE "color_bar_name" WITH CLASS FETCH - target.set_pattern_description( - color_pattern_name + '_' + x_or_y + '_' + discrete_or_continuous - ) + target.set_pattern_description(color_pattern_name + '_' + x_or_y + '_' + discrete_or_continuous) # Return. return target @@ -693,13 +595,7 @@ def construct_target_polar_color_bar( # Target dimensions. ) # Set pattern description. # Color bar. - pattern_description = ( - color_below_min.short_name - + '.' - + color_bar_name - + '.' - + color_above_max.short_name - ) + pattern_description = color_below_min.short_name + '.' + color_bar_name + '.' + color_above_max.short_name # Linear vs. polar. pattern_description += '_polar_' + radial_gradient_name # Color interpolation. @@ -715,9 +611,7 @@ def construct_target_polar_color_bar( # Target dimensions. assert False # ?? SCAFFOLDING RCB -- USE EXCEPTION # Radial gradient. if radial_gradient_type == "saturated_center_to_white": - pattern_description += '_exp' + '{0:.2f}'.format( - saturated_center_to_white_exponent - ) + pattern_description += '_exp' + '{0:.2f}'.format(saturated_center_to_white_exponent) elif radial_gradient_type == "light_center_to_saturated": pattern_description += ( '_sat' @@ -755,9 +649,7 @@ def construct_target_polar_color_bar( # Target dimensions. def construct_target_blue_under_red_cross_green( - image_width: float, # Meter - image_height: float, # Meter - dpm: float, # Dots per meter + image_width: float, image_height: float, dpm: float # Meter # Meter # Dots per meter ) -> TargetColor: # Blank target. target = tc.TargetColor( @@ -766,7 +658,9 @@ def construct_target_blue_under_red_cross_green( # Set colors. target.set_image_to_blue_under_red_cross_green() # Set pattern description. - color_pattern_name_root = 'blue_under_red_cross_green' # ?? SCAFFOLDING RCB -- REPLACE "color_pattern_name_root" WITH CLASS FETCH? + color_pattern_name_root = ( + 'blue_under_red_cross_green' # ?? SCAFFOLDING RCB -- REPLACE "color_pattern_name_root" WITH CLASS FETCH? + ) color_pattern_name = color_pattern_name_root target.set_pattern_description(color_pattern_name) # Return. @@ -774,10 +668,7 @@ def construct_target_blue_under_red_cross_green( def construct_target_rgb_cube_inscribed_square( - image_width: float, # Meter - image_height: float, # Meter - dpm: float, # Dots per meter - project_to_cube: bool, + image_width: float, image_height: float, dpm: float, project_to_cube: bool # Meter # Meter # Dots per meter ) -> TargetColor: # Blank target. target = tc.TargetColor( @@ -786,7 +677,9 @@ def construct_target_rgb_cube_inscribed_square( # Set colors. target.set_image_to_rgb_cube_inscribed_square(project_to_cube) # Set pattern description. - color_pattern_name_root = 'rgb_cube_inscribed_square' # ?? SCAFFOLDING RCB -- REPLACE "color_pattern_name_root" WITH CLASS FETCH? + color_pattern_name_root = ( + 'rgb_cube_inscribed_square' # ?? SCAFFOLDING RCB -- REPLACE "color_pattern_name_root" WITH CLASS FETCH? + ) if project_to_cube: color_pattern_name_root += '_projected' else: @@ -832,9 +725,7 @@ def extend_target_left( new_image_width = new_n_cols / dpm new_image_height = target.image_height new_image_dpm = dpm - new_target = TargetColor( - new_image_width, new_image_height, new_image_dpm, new_color - ) + new_target = TargetColor(new_image_width, new_image_height, new_image_dpm, new_color) # Check code consistency. check_n_rows, check_n_cols, check_n_bands = new_target.rows_cols_bands() @@ -875,11 +766,7 @@ def extend_target_left( # Set description. if new_target_name == None: new_target.set_pattern_description( - 'l' - + str(new_pixels) - + new_color.short_name - + 'px_' - + target.pattern_description + 'l' + str(new_pixels) + new_color.short_name + 'px_' + target.pattern_description ) else: new_target.set_pattern_description(new_target_name) @@ -912,9 +799,7 @@ def extend_target_right( new_image_width = new_n_cols / dpm new_image_height = target.image_height new_image_dpm = dpm - new_target = TargetColor( - new_image_width, new_image_height, new_image_dpm, new_color - ) + new_target = TargetColor(new_image_width, new_image_height, new_image_dpm, new_color) # Check code consistency. check_n_rows, check_n_cols, check_n_bands = new_target.rows_cols_bands() @@ -955,11 +840,7 @@ def extend_target_right( # Set description. if new_target_name == None: new_target.set_pattern_description( - target.pattern_description - + '_r' - + str(new_pixels) - + new_color.short_name - + 'px' + target.pattern_description + '_r' + str(new_pixels) + new_color.short_name + 'px' ) else: new_target.set_pattern_description(new_target_name) @@ -999,9 +880,7 @@ def extend_target_top( new_image_width = target.image_width new_image_height = new_n_rows / dpm new_image_dpm = dpm - new_target = TargetColor( - new_image_width, new_image_height, new_image_dpm, new_color - ) + new_target = TargetColor(new_image_width, new_image_height, new_image_dpm, new_color) # Check code consistency. check_n_rows, check_n_cols, check_n_bands = new_target.rows_cols_bands() @@ -1042,11 +921,7 @@ def extend_target_top( # Set description. if new_target_name == None: new_target.set_pattern_description( - 't' - + str(new_pixels) - + new_color.short_name - + 'px_' - + target.pattern_description + 't' + str(new_pixels) + new_color.short_name + 'px_' + target.pattern_description ) else: new_target.set_pattern_description(new_target_name) @@ -1086,9 +961,7 @@ def extend_target_bottom( new_image_width = target.image_width new_image_height = new_n_rows / dpm new_image_dpm = dpm - new_target = TargetColor( - new_image_width, new_image_height, new_image_dpm, new_color - ) + new_target = TargetColor(new_image_width, new_image_height, new_image_dpm, new_color) # Check code consistency. check_n_rows, check_n_cols, check_n_bands = new_target.rows_cols_bands() @@ -1128,11 +1001,7 @@ def extend_target_bottom( # Set description. if new_target_name == None: new_target.set_pattern_description( - target.pattern_description - + '_b' - + str(new_pixels) - + new_color.short_name - + 'px' + target.pattern_description + '_b' + str(new_pixels) + new_color.short_name + 'px' ) else: new_target.set_pattern_description(new_target_name) @@ -1159,26 +1028,16 @@ def extend_target_all( """ # Extend. extended_target_l = tc.extend_target_left(target, new_pixels, new_color) - extended_target_lr = tc.extend_target_right( - extended_target_l, new_pixels, new_color - ) - extended_target_lrt = tc.extend_target_top( - extended_target_lr, new_pixels, new_color - ) - extended_target_lrtb = tc.extend_target_bottom( - extended_target_lrt, new_pixels, new_color - ) + extended_target_lr = tc.extend_target_right(extended_target_l, new_pixels, new_color) + extended_target_lrt = tc.extend_target_top(extended_target_lr, new_pixels, new_color) + extended_target_lrtb = tc.extend_target_bottom(extended_target_lrt, new_pixels, new_color) new_target = extended_target_lrtb # Set description. if new_target_name == None: # Add to target pattern_description to discard per-side description changes. new_target.set_pattern_description( - 'bord' - + str(new_pixels) - + new_color.short_name - + 'px_' - + target.pattern_description + 'bord' + str(new_pixels) + new_color.short_name + 'px_' + target.pattern_description ) else: new_target.set_pattern_description(new_target_name) @@ -1198,14 +1057,10 @@ def extend_target_for_splice_left_right( target: TargetColor, n_extend: int, fill_color: Color.Color, auto_expand: str ) -> TargetColor: if auto_expand == 'fill_top': - new_target = extend_target_top( - target, n_extend, fill_color, new_target_name=target.pattern_description - ) + new_target = extend_target_top(target, n_extend, fill_color, new_target_name=target.pattern_description) return new_target elif auto_expand == 'fill_bottom': - new_target = extend_target_bottom( - target, n_extend, fill_color, new_target_name=target.pattern_description - ) + new_target = extend_target_bottom(target, n_extend, fill_color, new_target_name=target.pattern_description) return new_target elif auto_expand == 'fill_even': n_extend_top = int(n_extend / 2) @@ -1217,19 +1072,13 @@ def extend_target_for_splice_left_right( assert False # ?? SCAFFOLDING -- CONVERT TO EXCEPTION. if n_extend_top > 0: new_target_top = extend_target_top( - target, - n_extend_top, - fill_color, - new_target_name=target.pattern_description, + target, n_extend_top, fill_color, new_target_name=target.pattern_description ) else: new_target_top = target if n_extend_bottom > 0: new_target_top_bottom = extend_target_bottom( - new_target_top, - n_extend_bottom, - fill_color, - new_target_name=target.pattern_description, + new_target_top, n_extend_bottom, fill_color, new_target_name=target.pattern_description ) else: new_target_top_bottom = new_target_top @@ -1280,20 +1129,14 @@ def splice_targets_left_right( if (left_n_rows != right_n_rows) and (auto_expand != None): if left_n_rows < right_n_rows: n_extend = right_n_rows - left_n_rows - left_target = extend_target_for_splice_left_right( - left_target, n_extend, initial_color, auto_expand - ) + left_target = extend_target_for_splice_left_right(left_target, n_extend, initial_color, auto_expand) left_n_rows, left_n_cols, left_n_bands = left_target.rows_cols_bands() elif left_n_rows > right_n_rows: n_extend = left_n_rows - right_n_rows - right_target = extend_target_for_splice_left_right( - right_target, n_extend, initial_color, auto_expand - ) + right_target = extend_target_for_splice_left_right(right_target, n_extend, initial_color, auto_expand) right_n_rows, right_n_cols, right_n_bands = right_target.rows_cols_bands() else: - print( - 'ERROR: In splice_targets_left_right(), unexpected situation encountered.' - ) + print('ERROR: In splice_targets_left_right(), unexpected situation encountered.') assert False # ?? SCAFFOLDING -- CONVERT TO EXCEPTION. # Check input. @@ -1314,11 +1157,7 @@ def splice_targets_left_right( ) assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION if left_n_bands != 3: - print( - "ERROR: In splice_targets_left_right(), left_n_bands=" - + str(left_n_bands) - + " is not equal to 3." - ) + print("ERROR: In splice_targets_left_right(), left_n_bands=" + str(left_n_bands) + " is not equal to 3.") assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION if left_dpm != right_dpm: print( @@ -1338,9 +1177,7 @@ def splice_targets_left_right( new_image_width = new_n_cols / left_dpm new_image_height = left_target.image_height new_image_dpm = left_dpm - new_target = TargetColor( - new_image_width, new_image_height, new_image_dpm, initial_color - ) + new_target = TargetColor(new_image_width, new_image_height, new_image_dpm, initial_color) # Check code consistency. check_n_rows, check_n_cols, check_n_bands = new_target.rows_cols_bands() @@ -1389,9 +1226,7 @@ def splice_targets_left_right( # Set description. if new_target_name == None: new_target.set_pattern_description( - left_target.pattern_description - + "__left__" - + right_target.pattern_description + left_target.pattern_description + "__left__" + right_target.pattern_description ) else: new_target.set_pattern_description(new_target_name) @@ -1430,9 +1265,7 @@ def splice_targets_above_below( ) # ?? SCAFFOLDING RCB -- SHOULD THESE BE ACCESSOR FUNCTIONS? WHAT IS OPENCSP POLICY/PATTERN ON THIS? # Check input. - if ( - above_n_cols != below_n_cols - ): # ?? SCAFFOLDING RCB -- EXTEND ROUTINE TO ALLOW UNEQUAL COLUMNS? + if above_n_cols != below_n_cols: # ?? SCAFFOLDING RCB -- EXTEND ROUTINE TO ALLOW UNEQUAL COLUMNS? print( "ERROR: In splice_targets_above_below(), unequal above_n_cols=" + str(above_n_cols) @@ -1449,11 +1282,7 @@ def splice_targets_above_below( ) assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION if above_n_bands != 3: - print( - "ERROR: In splice_targets_above_below(), above_n_bands=" - + str(above_n_bands) - + " is not equal to 3." - ) + print("ERROR: In splice_targets_above_below(), above_n_bands=" + str(above_n_bands) + " is not equal to 3.") assert False # ?? SCAFFOLDING RCB -- CONVERT TO EXCEPTION if above_dpm != below_dpm: print( @@ -1473,9 +1302,7 @@ def splice_targets_above_below( new_image_width = above_target.image_width new_image_height = new_n_rows / above_dpm new_image_dpm = above_dpm - new_target = TargetColor( - new_image_width, new_image_height, new_image_dpm, initial_color - ) + new_target = TargetColor(new_image_width, new_image_height, new_image_dpm, initial_color) # Check code consistency. check_n_rows, check_n_cols, check_n_bands = new_target.rows_cols_bands() @@ -1524,9 +1351,7 @@ def splice_targets_above_below( # Set description. if new_target_name == None: new_target.set_pattern_description( - above_target.pattern_description - + "__above__" - + below_target.pattern_description + above_target.pattern_description + "__above__" + below_target.pattern_description ) else: new_target.set_pattern_description(new_target_name) @@ -1552,10 +1377,7 @@ def construct_stacked_linear_color_bar( ) -> tc.TargetColor: # Check input. if n_stack < 1: - print( - 'ERROR: In stack_linear_color_bar(), encountered non-positive n_stack = ' - + str(n_stack) - ) + print('ERROR: In stack_linear_color_bar(), encountered non-positive n_stack = ' + str(n_stack)) assert False # ?? SCAFFOLDING RCB -- USE EXCEPTION if n_stack != len(discrete_or_continuous_list): print( @@ -1661,11 +1483,7 @@ def construct_stacked_linear_color_bar( light_to_saturated_max=this_light_to_saturated_max, ) stacked_target = tc.splice_targets_above_below( - this_bar_target_discrete, - stacked_target, - gap, - initial_color=gap_color, - new_target_name=target_name, + this_bar_target_discrete, stacked_target, gap, initial_color=gap_color, new_target_name=target_name ) elif discrete_or_continuous_list[idx] == 'continuous': this_bar_target_continuous = tc.construct_target_linear_color_bar( @@ -1737,9 +1555,7 @@ def construct_linear_color_bar_cascade( # Dimensions. ) -> tc.TargetColor: # Check input. if len(stack_sequence) == 0: - print( - 'ERROR: In construct_linear_color_bar_cascade(), encountered len(stack_sequence) == 0.' - ) + print('ERROR: In construct_linear_color_bar_cascade(), encountered len(stack_sequence) == 0.') assert False # ?? SCAFFOLDING RCB -- USE EXCEPTION if len(list_of_discrete_or_continuous_lists) != len(stack_sequence): print( @@ -1767,16 +1583,10 @@ def construct_linear_color_bar_cascade( # Dimensions. 'discrete', ) cascade_target = ref_target - cascade_target.set_pattern_description = tc.stacked_color_bar_name( - 1, cascade_target_name - ) + cascade_target.set_pattern_description = tc.stacked_color_bar_name(1, cascade_target_name) # Adjacent main linear color bar. - print( - 'In construct_linear_color_bar_cascade(), generating "' - + color_bar_name - + '" linear bar...' - ) + print('In construct_linear_color_bar_cascade(), generating "' + color_bar_name + '" linear bar...') main_color_target = tc.construct_target_linear_color_bar( color_bar_width, color_total_height, @@ -1789,25 +1599,15 @@ def construct_linear_color_bar_cascade( # Dimensions. 'discrete', ) cascade_target = tc.splice_targets_left_right( - cascade_target, - main_color_target, - gap=ref_gap_pix, - initial_color=gap_color, - new_target_name=cascade_target_name, + cascade_target, main_color_target, gap=ref_gap_pix, initial_color=gap_color, new_target_name=cascade_target_name ) # Generate cascade. for n_bars_in_stack, discrete_or_continuous_list, saturation_spec_list in zip( - stack_sequence, - list_of_discrete_or_continuous_lists, - list_of_saturation_spec_lists, + stack_sequence, list_of_discrete_or_continuous_lists, list_of_saturation_spec_lists ): # Status update. - print( - 'In construct_linear_color_bar_cascade(), generating stacked bar ' - + str(n_bars_in_stack) - + '...' - ) + print('In construct_linear_color_bar_cascade(), generating stacked bar ' + str(n_bars_in_stack) + '...') # Generate color bar stack and its neighbors. stacked_color_target = tc.construct_stacked_linear_color_bar( @@ -1850,29 +1650,17 @@ def construct_linear_color_bar_cascade( # Dimensions. 'discrete', ) # Always discrete. stacked_target = tc.splice_targets_left_right( - grey_target, - stacked_color_target, - gap=0, - initial_color=gap_color, - new_target_name=cascade_target_name, + grey_target, stacked_color_target, gap=0, initial_color=gap_color, new_target_name=cascade_target_name ) stacked_target = tc.splice_targets_left_right( - stacked_target, - grey_target, - gap=0, - initial_color=gap_color, - new_target_name=cascade_target_name, + stacked_target, grey_target, gap=0, initial_color=gap_color, new_target_name=cascade_target_name ) else: stacked_target = stacked_color_target # Update cascade. cascade_target = tc.splice_targets_left_right( - cascade_target, - stacked_target, - gap_between_bars_pix, - gap_color, - new_target_name=cascade_target_name, + cascade_target, stacked_target, gap_between_bars_pix, gap_color, new_target_name=cascade_target_name ) # Return. diff --git a/opencsp/common/lib/target/target_color_2d_rgb.py b/opencsp/common/lib/target/target_color_2d_rgb.py index f8bb6c51..b509453d 100755 --- a/opencsp/common/lib/target/target_color_2d_rgb.py +++ b/opencsp/common/lib/target/target_color_2d_rgb.py @@ -17,9 +17,7 @@ def construct_blue_under_red_cross_green( Blue underlying red cross green. """ - print( - "In construct_blue_under_red_cross_green()..." - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print("In construct_blue_under_red_cross_green()...") # ?? SCAFFOLDING RCB -- TEMPORARY # Create an empty image. n_rows = ny @@ -40,9 +38,7 @@ def construct_blue_under_red_cross_green( y = n_rows - row x_frac = x / x_max y_frac = y / y_max - diagonal_frac = np.sqrt(x * x + y * y) / np.sqrt( - x_max * x_max + y_max * y_max - ) + diagonal_frac = np.sqrt(x * x + y * y) / np.sqrt(x_max * x_max + y_max * y_max) image[row, col, 0] = x_frac * max_intensity image[row, col, 1] = y_frac * max_intensity image[row, col, 2] = (1 - diagonal_frac) * max_intensity @@ -63,9 +59,7 @@ def construct_rgb_cube_inscribed_square_image( the Red, Green, and Blue basis vectors, and their pairwise combinations to form Cyan, Magenta, and Yellow secondary vectors. """ - print( - "In construct_rgb_cube_inscribed_square_image()..." - ) # ?? SCAFFOLDING RCB -- TEMPORARY + print("In construct_rgb_cube_inscribed_square_image()...") # ?? SCAFFOLDING RCB -- TEMPORARY # Create container for color vectors l = 0.4 @@ -81,12 +75,7 @@ def construct_rgb_cube_inscribed_square_image( # Create mask for non-valid points mask = ( - (vecs[:, 0] > 1) - + (vecs[:, 0] < 0) - + (vecs[:, 1] > 1) - + (vecs[:, 1] < 0) - + (vecs[:, 2] > 1) - + (vecs[:, 2] < 0) + (vecs[:, 0] > 1) + (vecs[:, 0] < 0) + (vecs[:, 1] > 1) + (vecs[:, 1] < 0) + (vecs[:, 2] > 1) + (vecs[:, 2] < 0) ) # Apply mask @@ -130,12 +119,7 @@ def construct_rgb_cube_inscribed_square_image( # Create mask for non-valid points mask = ( - (vecs[:, 0] > 1) - + (vecs[:, 0] < 0) - + (vecs[:, 1] > 1) - + (vecs[:, 1] < 0) - + (vecs[:, 2] > 1) - + (vecs[:, 2] < 0) + (vecs[:, 0] > 1) + (vecs[:, 0] < 0) + (vecs[:, 1] > 1) + (vecs[:, 1] < 0) + (vecs[:, 2] > 1) + (vecs[:, 2] < 0) ) # Apply mask diff --git a/opencsp/common/lib/target/target_color_convert.py b/opencsp/common/lib/target/target_color_convert.py index 7e0c1bd4..8d081091 100755 --- a/opencsp/common/lib/target/target_color_convert.py +++ b/opencsp/common/lib/target/target_color_convert.py @@ -215,15 +215,7 @@ def nikon_D3300_monitor_equal_step_color_bar(): # Color lookup -def color_given_value( - val, - val_min, - val_max, - color_below_min, - color_bar, - color_above_max, - discrete_or_continuous, -): +def color_given_value(val, val_min, val_max, color_below_min, color_bar, color_above_max, discrete_or_continuous): # Out-of-bounds cases. if val < val_min: return color_below_min @@ -236,9 +228,7 @@ def color_given_value( # From here on we know (val_min <= val <= val_max), and color_bar contains multiple colors. n_steps = n_colors - 1 # Last block on color bar is not a step. val_step = (val_max - val_min) / n_steps - step = ( - val - val_min - ) / val_step # Since (val_min <= val <= val_max), we know (0 <= step <= 1). + step = (val - val_min) / val_step # Since (val_min <= val <= val_max), we know (0 <= step <= 1). idx = int(step) if discrete_or_continuous == 'discrete': return color_bar[idx] @@ -258,11 +248,7 @@ def color_given_value( d_green = color_1[1] - color_0[1] d_blue = color_1[2] - color_0[2] # Return. - return [ - (color_0[0] + (frac * d_red)), - (color_0[1] + (frac * d_green)), - (color_0[2] + (frac * d_blue)), - ] + return [(color_0[0] + (frac * d_red)), (color_0[1] + (frac * d_green)), (color_0[2] + (frac * d_blue))] else: print( 'ERROR: In color_given_value(), encountered unexpected discrete_or_continuous value:', @@ -282,15 +268,9 @@ def angle_between_color_vectors(rgb_1, rgb_2): def color_bar_segment_spanned_angle(idx, color_bar): if idx < 0: - print( - 'ERROR: In angle_between_color_vectors(), idx = ', - str(idx), - ' is less than zero.', - ) + print('ERROR: In angle_between_color_vectors(), idx = ', str(idx), ' is less than zero.') assert False # ?? SCAFFOLDING RCB -- REPLACE WITH RAISING AN EXCEPTION? (THROUGHOUT) - if idx >= ( - len(color_bar) - 1 - ): # Below we fetch color_bar(idx+1), so compare against (len(color-bar)-1). + if idx >= (len(color_bar) - 1): # Below we fetch color_bar(idx+1), so compare against (len(color-bar)-1). print( 'ERROR: In angle_between_color_vectors(), idx = ', str(idx), @@ -306,9 +286,7 @@ def color_bar_segment_spanned_angle(idx, color_bar): def construct_color_bar_spanned_angle_list(color_bar): angle_list = [] - for idx in range( - len(color_bar) - 1 - ): # One less than n_colors, because we look at pairs of (idx, idx+1). + for idx in range(len(color_bar) - 1): # One less than n_colors, because we look at pairs of (idx, idx+1). angle_1_2 = color_bar_segment_spanned_angle(idx, color_bar) angle_list.append(angle_1_2) # Return. @@ -319,9 +297,7 @@ def survey_color_bar(color_bar): angle_sum = 0.0 first_non_zero_angle_idx = -1 last_non_zero_angle_idx = -1 - for idx in range( - len(color_bar) - 1 - ): # One less than n_colors, because we look at pairs of (idx, idx+1). + for idx in range(len(color_bar) - 1): # One less than n_colors, because we look at pairs of (idx, idx+1). angle_1_2 = color_bar_segment_spanned_angle(idx, color_bar) if (first_non_zero_angle_idx < 0) and (angle_1_2 > 0): first_non_zero_angle_idx = idx @@ -332,15 +308,11 @@ def survey_color_bar(color_bar): return angle_sum, first_non_zero_angle_idx, last_non_zero_angle_idx -def construct_rgb_cumulative_angle_pair_list( - color_bar, first_color_idx, last_color_idx -): +def construct_rgb_cumulative_angle_pair_list(color_bar, first_color_idx, last_color_idx): cumulative_angle = 0.0 rgb_cumulative_angle_list = [] # TODO RCB: This logic is confusing and opaque. Can it be simplified? - if last_color_idx < ( - len(color_bar) - 1 - ): # Routine color_bar_segment_spanned_angle() will access color_bar[idx+1]. + if last_color_idx < (len(color_bar) - 1): # Routine color_bar_segment_spanned_angle() will access color_bar[idx+1]. last_idx = last_color_idx else: last_idx = len(color_bar) - 1 @@ -415,22 +387,14 @@ def interpolate_color(desired_angle, rgb_before, angle_before, rgb_after, angle_ interpolated_b = b_before + (angle_frac * (b_after - b_before)) # Assemble. # We round values because [R,G,B] colors are defined by ints. Another approximation. - interpolated_rgb = ( - round(interpolated_r), - round(interpolated_g), - round(interpolated_b), - ) + interpolated_rgb = (round(interpolated_r), round(interpolated_g), round(interpolated_b)) return interpolated_rgb def interpolate_color_given_angle(rgb_angle_list, desired_angle): - rgb_before, angle_before = lookup_color_and_angle_before( - rgb_angle_list, desired_angle - ) + rgb_before, angle_before = lookup_color_and_angle_before(rgb_angle_list, desired_angle) rgb_after, angle_after = lookup_color_and_angle_after(rgb_angle_list, desired_angle) - interpolated_rgb = interpolate_color( - desired_angle, rgb_before, angle_before, rgb_after, angle_after - ) + interpolated_rgb = interpolate_color(desired_angle, rgb_before, angle_before, rgb_after, angle_after) # print("In interpolate_color_given_angle(), angle before/desired/after: {ab:.4f} / {da:.4f} / {aa:.4f} rgb before/interpolated/after: {rb} / {ir} / {ra}".format(ab=angle_before, da=desired_angle, aa=angle_after, rb=rgb_before, ir=interpolated_rgb, ra=rgb_after)) return interpolated_rgb @@ -442,9 +406,7 @@ def normalize_color_bar_to_equal_angles(color_bar): # Survey color bar, computing total color spanning angle and identifying preamble/postamble # boundary sections with zero color spanning angle. # Note: Input color bar must not have interior segments with zero color spanning angle. - angle_sum, first_non_zero_angle_idx, last_non_zero_angle_idx = survey_color_bar( - color_bar - ) + angle_sum, first_non_zero_angle_idx, last_non_zero_angle_idx = survey_color_bar(color_bar) # Interpolation parameters. first_color_idx = first_non_zero_angle_idx @@ -453,9 +415,7 @@ def normalize_color_bar_to_equal_angles(color_bar): angle_step = angle_sum / n_steps # Construct reference color/angle pair list. - rgb_cumulative_angle_list = construct_rgb_cumulative_angle_pair_list( - color_bar, first_color_idx, last_color_idx - ) + rgb_cumulative_angle_list = construct_rgb_cumulative_angle_pair_list(color_bar, first_color_idx, last_color_idx) # Generate equal-angle colors, along the same path in the [R,G,B] color space. rgb_0 = color_bar[first_color_idx] @@ -464,9 +424,7 @@ def normalize_color_bar_to_equal_angles(color_bar): equal_angle_rgb_angle_list.append([rgb_0, cumulative_angle]) while cumulative_angle < (angle_sum - 1e-6): # tolerance to prevent additional step cumulative_angle += angle_step - interpolated_rgb = interpolate_color_given_angle( - rgb_cumulative_angle_list, cumulative_angle - ) + interpolated_rgb = interpolate_color_given_angle(rgb_cumulative_angle_list, cumulative_angle) equal_angle_rgb_angle_list.append([interpolated_rgb, cumulative_angle]) # Construct a color bar, without angles. @@ -496,7 +454,5 @@ def normalize_color_bar_to_equal_angles(color_bar): print("\nequal_angle_color_bar = ", equal_angle_color_bar) # Print angle list. - normalized_angle_list = construct_color_bar_spanned_angle_list( - equal_angle_color_bar - ) + normalized_angle_list = construct_color_bar_spanned_angle_list(equal_angle_color_bar) print("\nNormalized spanned angle list=", normalized_angle_list) diff --git a/opencsp/common/lib/target/target_image.py b/opencsp/common/lib/target/target_image.py index 16189e31..bffb33b4 100755 --- a/opencsp/common/lib/target/target_image.py +++ b/opencsp/common/lib/target/target_image.py @@ -13,9 +13,7 @@ import opencsp.common.lib.tool.unit_conversion as uc -def construct_target_image( - image_width, image_height, dpm # Meters # Meters -): # Dots per meter +def construct_target_image(image_width, image_height, dpm): # Meters # Meters # Dots per meter image_cols = round(image_width * dpm) image_rows = round(image_height * dpm) img = np.uint8( diff --git a/opencsp/common/lib/test/TestOutput.py b/opencsp/common/lib/test/TestOutput.py index 32defa67..80e9ca35 100644 --- a/opencsp/common/lib/test/TestOutput.py +++ b/opencsp/common/lib/test/TestOutput.py @@ -17,9 +17,7 @@ from opencsp.common.lib.render_control.RenderControlAxis import RenderControlAxis import opencsp.common.lib.render_control.RenderControlFigure as rcfg from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord import opencsp.common.lib.test.support_test as stest import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.log_tools as lt @@ -43,17 +41,11 @@ def setup_class( self.output_path = output_path # Set the location to save files. - self.expected_output_dir = os.path.join( - self.output_path, 'data', 'input', self.source_file_body - ) - self.actual_output_dir = os.path.join( - self.output_path, 'data', 'output', self.source_file_body - ) + self.expected_output_dir = os.path.join(self.output_path, 'data', 'input', self.source_file_body) + self.actual_output_dir = os.path.join(self.output_path, 'data', 'output', self.source_file_body) # Setup log reporting. - log_file_dir_body_ext = os.path.join( - self.actual_output_dir, self.source_file_body + '.log' - ) + log_file_dir_body_ext = os.path.join(self.actual_output_dir, self.source_file_body + '.log') print('log_file_dir_body_ext = ', log_file_dir_body_ext) lt.logger(log_file_dir_body_ext, delete_existing_log=True) @@ -66,12 +58,8 @@ def setup_class( # Set the figure and axis control for all figures. lt.info('Initializing render control structures...') - self.figure_control: RenderControlFigure = rcfg.RenderControlFigure( - tile_array=(2, 1), tile_square=True - ) - self.figure_control_large: RenderControlFigure = rcfg.RenderControlFigure( - tile_array=(1, 1), tile_square=False - ) + self.figure_control: RenderControlFigure = rcfg.RenderControlFigure(tile_array=(2, 1), tile_square=True) + self.figure_control_large: RenderControlFigure = rcfg.RenderControlFigure(tile_array=(1, 1), tile_square=False) self.axis_control_m: RenderControlAxis = rca.meters() # Note: It is tempting to put the "Reset rendering" code lines here, to avoid redundant @@ -122,20 +110,13 @@ def figure_prefix(self, figure_num: int) -> None: # return self.figure_prefix_root + '{0:03d}'.format(figure_num) - def show_save_and_check_figure( - self, fig_record: RenderControlFigureRecord, dpi=600 - ) -> None: + def show_save_and_check_figure(self, fig_record: RenderControlFigureRecord, dpi=600) -> None: """ Once a figure is drawn, this routine wraps up the test. """ # Show the figure, save it to disk, and verify that it matches expectations. stest.show_save_and_check_figure( - fig_record, - self.actual_output_dir, - self.expected_output_dir, - self.verify, - show_figs=True, - dpi=dpi, + fig_record, self.actual_output_dir, self.expected_output_dir, self.verify, show_figs=True, dpi=dpi ) # Clear. if not self.interactive: @@ -147,13 +128,7 @@ def save_and_check_image(self, image, dpm, output_file_body, output_ext) -> None """ # Save the image to disk, and verify that it matches expectations. stest.save_and_check_image( - image, - dpm, - self.actual_output_dir, - self.expected_output_dir, - output_file_body, - output_ext, - self.verify, + image, dpm, self.actual_output_dir, self.expected_output_dir, output_file_body, output_ext, self.verify ) # Clear. if not self.interactive: diff --git a/opencsp/common/lib/test/support_test.py b/opencsp/common/lib/test/support_test.py index edcb5fae..8b49c6e3 100644 --- a/opencsp/common/lib/test/support_test.py +++ b/opencsp/common/lib/test/support_test.py @@ -7,9 +7,7 @@ import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.log_tools as lt -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord import opencsp.common.lib.target.target_image as ti import opencsp.common.lib.csp.SolarField as sf @@ -22,9 +20,7 @@ def lines_share_common_string(line_1: str, line_2: str, ignore: str) -> bool: return False -def lines_share_an_ignore_string( - line_1: str, line_2: str, ignore_string_list: list[str] -) -> bool: +def lines_share_an_ignore_string(line_1: str, line_2: str, ignore_string_list: list[str]) -> bool: for ignore_string in ignore_string_list: if lines_share_common_string(line_1, line_2, ignore_string): return True @@ -43,9 +39,7 @@ def svg_lines_are_equal(line_1: str, line_2: str) -> bool: elif len(line_1) != len(line_2): return False # svg string analysis. - if lines_share_an_ignore_string( - line_1, line_2, ['', 'clip-path', 'clipPath', 'path id', ''] - ): + if lines_share_an_ignore_string(line_1, line_2, ['', 'clip-path', 'clipPath', 'path id', '']): return True else: # Walk the lines comparing characters, ignoring hashed addresses. @@ -119,9 +113,7 @@ def compare_txt_files(expected_file: str, actual_file: str) -> bool: return True -def verify_output_file_matches_expected( - file_created: str, actual_output_dir: str, expected_output_dir: str -) -> None: +def verify_output_file_matches_expected(file_created: str, actual_output_dir: str, expected_output_dir: str) -> None: """ Verifies that the actual output file matches what's expected. """ @@ -145,9 +137,7 @@ def verify_output_file_matches_expected( ) # Verify both files are equal. if created_ext == '.svg': - svg_files_are_equal = compare_svg_files( - actual_dir_body_ext, expected_dir_body_ext - ) + svg_files_are_equal = compare_svg_files(actual_dir_body_ext, expected_dir_body_ext) if not svg_files_are_equal: lt.error_and_raise( ValueError, @@ -156,14 +146,9 @@ def verify_output_file_matches_expected( ' expected_dir_body_ext = ' + str(expected_dir_body_ext), ) elif created_ext == '.png': - png_files_are_equal = compare_actual_expected_images( - actual_dir_body_ext, expected_dir_body_ext - ) + png_files_are_equal = compare_actual_expected_images(actual_dir_body_ext, expected_dir_body_ext) if png_files_are_equal is not None: - lt.error_and_raise( - ValueError, - 'In verify_output_file_matches_expected(), ' + png_files_are_equal, - ) + lt.error_and_raise(ValueError, 'In verify_output_file_matches_expected(), ' + png_files_are_equal) else: files_are_equal = compare_txt_files(actual_dir_body_ext, expected_dir_body_ext) @@ -183,9 +168,7 @@ def verify_output_files_match_expected( Verifies that all of the output files match what's expected. """ for file_created in files_created: - verify_output_file_matches_expected( - file_created, actual_output_dir, expected_output_dir - ) + verify_output_file_matches_expected(file_created, actual_output_dir, expected_output_dir) def show_save_and_check_figure( @@ -208,14 +191,10 @@ def show_save_and_check_figure( z_limits=fig_record.z_limits, ) # Save. - files_created = fig_record.save( - actual_output_dir, format='png', dpi=dpi - ) # Filename inferred from figure title. + files_created = fig_record.save(actual_output_dir, format='png', dpi=dpi) # Filename inferred from figure title. # Check. if verify: - verify_output_files_match_expected( - files_created, actual_output_dir, expected_output_dir - ) + verify_output_files_match_expected(files_created, actual_output_dir, expected_output_dir) def save_and_check_image( @@ -237,26 +216,20 @@ def save_and_check_image( files_created = [file_created] # Check. if verify: - verify_output_files_match_expected( - files_created, actual_output_dir, expected_output_dir - ) + verify_output_files_match_expected(files_created, actual_output_dir, expected_output_dir) def compare_actual_expected_images(actual_location: str, expected_location: str): return mplt.compare_images(expected_location, actual_location, 0.2) -def load_solar_field_partition( - heliostat_names: list, partitioned_csv_file_name: str -) -> sf.SolarField: +def load_solar_field_partition(heliostat_names: list, partitioned_csv_file_name: str) -> sf.SolarField: import opencsp.common.lib.opencsp_path.data_path_for_test as dpft import opencsp.common.lib.geo.lon_lat_nsttf as lln import csv # Load the CSV into a dictionary - with open( - dpft.sandia_nsttf_test_heliostats_origin_file(), 'r', newline='' - ) as infile: + with open(dpft.sandia_nsttf_test_heliostats_origin_file(), 'r', newline='') as infile: reader = csv.reader(infile) dict = {rows[0]: rows[0:-1] for rows in reader} diff --git a/opencsp/common/lib/test/test_MirrorOutput.py b/opencsp/common/lib/test/test_MirrorOutput.py index 8036fc9c..0ee03cc4 100644 --- a/opencsp/common/lib/test/test_MirrorOutput.py +++ b/opencsp/common/lib/test/test_MirrorOutput.py @@ -25,9 +25,7 @@ import opencsp.common.lib.tool.string_tools as st from opencsp.common.lib.csp.ufacet.Facet import Facet from opencsp.common.lib.csp.ufacet.Heliostat import Heliostat -from opencsp.common.lib.csp.MirrorParametricRectangular import ( - MirrorParametricRectangular, -) +from opencsp.common.lib.csp.MirrorParametricRectangular import MirrorParametricRectangular from opencsp.common.lib.csp.MirrorParametric import MirrorParametric from opencsp.common.lib.csp.SolarField import SolarField @@ -58,16 +56,8 @@ def setup_class( self.m1_len_y = 3.0 # m self.m1_rectangle_xy = (self.m1_len_x, self.m1_len_y) self.m1 = MirrorParametricRectangular(self.m1_fxn, self.m1_rectangle_xy) - self.m1_shape_description = ( - 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' - ) - self.m1_title = ( - 'Mirror (' - + self.m1_shape_description - + ', f=' - + str(self.m1_focal_length) - + 'm), Face Up' - ) + self.m1_shape_description = 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' + self.m1_title = 'Mirror (' + self.m1_shape_description + ', f=' + str(self.m1_focal_length) + 'm), Face Up' self.m1_caption = ( 'A single mirror of shape (' + self.m1_shape_description @@ -107,26 +97,16 @@ def setup_class( # Set canting angles. cos5 = np.cos(np.deg2rad(8)) sin5 = np.sin(np.deg2rad(8)) - tilt_up = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]]) - ) - tilt_down = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]]) - ) - tilt_left = Rotation.from_matrix( - np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]]) - ) - tilt_right = Rotation.from_matrix( - np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]]) - ) + tilt_up = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]])) + tilt_down = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]])) + tilt_left = Rotation.from_matrix(np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]])) + tilt_right = Rotation.from_matrix(np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]])) self.h2x2_f1.canting = tilt_left * tilt_up self.h2x2_f2.canting = tilt_right * tilt_up self.h2x2_f3.canting = tilt_left * tilt_down self.h2x2_f4.canting = tilt_right * tilt_down self.h2x2_facets = [self.h2x2_f1, self.h2x2_f2, self.h2x2_f3, self.h2x2_f4] - self.h2x2 = Heliostat( - 'Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0 - ) + self.h2x2 = Heliostat('Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0) self.h2x2_title = 'Heliostat with Parametrically Defined Facets' self.h2x2_caption = ( 'Heliostat with four facets (' @@ -138,37 +118,15 @@ def setup_class( self.h2x2_comments = [] # Simple solar field, with two simple heliostats. - self.sf2x2_h1 = Heliostat( - 'Heliostat 1', - [0, 0, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) - self.sf2x2_h2 = Heliostat( - 'Heliostat 2', - [0, 10, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) + self.sf2x2_h1 = Heliostat('Heliostat 1', [0, 0, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) + self.sf2x2_h2 = Heliostat('Heliostat 2', [0, 10, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) self.sf2x2_heliostats = [self.sf2x2_h1, self.sf2x2_h2] - self.sf2x2 = SolarField( - 'Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats - ) + self.sf2x2 = SolarField('Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats) self.sf2x2_title = 'Two Heliostats' self.sf2x2_caption = 'Two 4-facet heliostats, tracking.' self.sf2x2_comments = [] - def lambda_symmetric_paraboloid( - self, focal_length: float - ) -> Callable[[float, float], float]: + def lambda_symmetric_paraboloid(self, focal_length: float) -> Callable[[float, float], float]: """Returns a callable for a symmetric paraboloid surface Parameters @@ -244,9 +202,7 @@ def test_facet(self) -> None: draw_surface_normal=False, draw_surface_normal_at_corners=True, ) - local_comments.append( - 'Render mirror surface with normals, facet outline with corner normals.' - ) + local_comments.append('Render mirror surface with normals, facet outline with corner normals.') # Draw. fig_record = fm.setup_figure_for_3d_data( @@ -276,25 +232,16 @@ def test_solar_field(self) -> None: # Set configurations. self.sf2x2_h1.set_configuration(hc.face_west()) - local_comments.append( - 'Heliostat 1 oriented initially face west.' - ) # Overriden by tracking below. + local_comments.append('Heliostat 1 oriented initially face west.') # Overriden by tracking below. self.sf2x2_h2.set_configuration(hc.face_south()) - local_comments.append( - 'Heliostat 2 oriented initially face south.' - ) # Overriden by tracking below. + local_comments.append('Heliostat 2 oriented initially face south.') # Overriden by tracking below. # Define tracking time. aimpoint_xyz = [60.0, 8.8, 28.9] # year, month, day, hour, minute, second, zone] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] self.sf2x2.set_full_field_tracking(aimpoint_xyz, when_ymdhmsz) - local_comments.append( - 'Heliostats set to track to ' - + str(aimpoint_xyz) - + ' at ymdhmsz =' - + str(when_ymdhmsz) - ) + local_comments.append('Heliostats set to track to ' + str(aimpoint_xyz) + ' at ymdhmsz =' + str(when_ymdhmsz)) # Setup render control. mirror_control = rcm.RenderControlMirror(surface_normals=False) @@ -317,9 +264,7 @@ def test_solar_field(self) -> None: solar_field_control = rcsf.RenderControlSolarField( heliostat_styles=rce.RenderControlEnsemble(heliostat_control) ) - local_comments.append( - 'Render mirror surfaces, facet centroids, and heliostat outline and surface normal.' - ) + local_comments.append('Render mirror surfaces, facet centroids, and heliostat outline and surface normal.') # Draw. fig_record = fm.setup_figure_for_3d_data( @@ -358,11 +303,7 @@ def test_heliostat_05W01_and_14W01(self) -> None: focal_length_5W01 = 55 # meters name_5W01 = '5W01' title_5W01 = 'NSTTF Heliostat ' + name_5W01 - caption_5W01 = ( - '5W01 modeled as a symmetric paraboloid with focal length f=' - + str(focal_length_5W01) - + 'm.' - ) + caption_5W01 = '5W01 modeled as a symmetric paraboloid with focal length f=' + str(focal_length_5W01) + 'm.' # 14W01. x_14W01 = -4.88 # meters # TODO RCB: FETCH FROM DEFINITION FILE y_14W01 = 194.71 # meters # TODO RCB: FETCH FROM DEFINITION FILE @@ -370,11 +311,7 @@ def test_heliostat_05W01_and_14W01(self) -> None: focal_length_14W01 = 186.8 # meters name_14W01 = '14W01' title_14W01 = 'NSTTF Heliostat ' + name_14W01 - caption_14W01 = ( - '14W01 modeled as a symmetric paraboloid with focal length f=' - + str(focal_length_14W01) - + 'm.' - ) + caption_14W01 = '14W01 modeled as a symmetric paraboloid with focal length f=' + str(focal_length_14W01) + 'm.' # Solar field. short_name_sf = 'Mini NSTTF' name_sf = 'Mini NSTTF with ' + name_5W01 + ' and ' + name_14W01 @@ -431,23 +368,15 @@ def fn_14W01(x, y): sf = SolarField(name_sf, short_name_sf, [-106.509606, 34.962276], heliostats) - comments_long = ( - comments.copy() - ) # We'll add a different comment for the plots with long normals. - comments_very_long = ( - comments.copy() - ) # We'll add a different comment for the plots with very long normals. + comments_long = comments.copy() # We'll add a different comment for the plots with long normals. + comments_very_long = comments.copy() # We'll add a different comment for the plots with very long normals. comments_exaggerated_z = ( comments.copy() ) # We'll add a different comment for the plots with an exaggerated z axis. - comments.append( - 'Render mirror surfaces and normals, facet outlines, and heliostat centroid.' - ) + comments.append('Render mirror surfaces and normals, facet outlines, and heliostat centroid.') # Setup render control (long normals). - mirror_control_long = rcm.RenderControlMirror( - surface_normals=True, norm_len=12, norm_res=3, resolution=3 - ) + mirror_control_long = rcm.RenderControlMirror(surface_normals=True, norm_len=12, norm_res=3, resolution=3) facet_control_long = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control_long, @@ -466,9 +395,7 @@ def fn_14W01(x, y): draw_facets=True, ) - comments_long.append( - 'Render mirror surfaces and long normals, facet outlines, and heliostat centroid.' - ) + comments_long.append('Render mirror surfaces and long normals, facet outlines, and heliostat centroid.') # Draw and output 5W01 figure (long normals, xy view). fig_record = fm.setup_figure_for_3d_data( @@ -505,9 +432,7 @@ def fn_14W01(x, y): # Setup render control (very long normals). mirror_control_very_long = rcm.RenderControlMirror( surface_normals=True, - norm_len=( - 2 * focal_length_14W01 - ), # Twice the focal length is the center of curvature. + norm_len=(2 * focal_length_14W01), # Twice the focal length is the center of curvature. norm_res=2, resolution=3, ) @@ -555,31 +480,13 @@ def fn_14W01(x, y): z_exaggerated_margin = 0.35 # meters, plus or minus reference height. decimal_factor = 100.0 # Different z limits for each heliostat, because they are at different elevations on the sloped field. - z_min_5W01 = ( - np.floor( - decimal_factor * ((z_5W01 + nsttf_pivot_offset) - z_exaggerated_margin) - ) - / decimal_factor - ) - z_max_5W01 = ( - np.ceil( - decimal_factor * ((z_5W01 + nsttf_pivot_offset) + z_exaggerated_margin) - ) - / decimal_factor - ) + z_min_5W01 = np.floor(decimal_factor * ((z_5W01 + nsttf_pivot_offset) - z_exaggerated_margin)) / decimal_factor + z_max_5W01 = np.ceil(decimal_factor * ((z_5W01 + nsttf_pivot_offset) + z_exaggerated_margin)) / decimal_factor exaggerated_z_limits_5W01 = [z_min_5W01, z_max_5W01] z_min_14W01 = ( - np.floor( - decimal_factor * ((z_14W01 + nsttf_pivot_offset) - z_exaggerated_margin) - ) - / decimal_factor - ) - z_max_14W01 = ( - np.ceil( - decimal_factor * ((z_14W01 + nsttf_pivot_offset) + z_exaggerated_margin) - ) - / decimal_factor + np.floor(decimal_factor * ((z_14W01 + nsttf_pivot_offset) - z_exaggerated_margin)) / decimal_factor ) + z_max_14W01 = np.ceil(decimal_factor * ((z_14W01 + nsttf_pivot_offset) + z_exaggerated_margin)) / decimal_factor exaggerated_z_limits_14W01 = [z_min_14W01, z_max_14W01] mirror_control_exaggerated_z = rcm.RenderControlMirror(surface_normals=False) facet_control_exaggerated_z = rcf.RenderControlFacet( @@ -632,7 +539,9 @@ def fn_14W01(x, y): code_tag=self.code_tag, ) fig_record.equal = False # Asserting equal axis scales contradicts exaggerated z limits in 2-d plots. - fig_record.z_limits = exaggerated_z_limits_14W01 # Limits are on z values, even though the plot is 2-d. View3d.py handles this. + fig_record.z_limits = ( + exaggerated_z_limits_14W01 # Limits are on z values, even though the plot is 2-d. View3d.py handles this. + ) h_14W01.draw(fig_record.view, heliostat_control_exaggerated_z) self.show_save_and_check_figure(fig_record, dpi=150) @@ -689,9 +598,7 @@ def fn(x, y): when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # Setup render control. - mirror_control = rcm.RenderControlMirror( - surface_normals=True, norm_len=4, norm_res=2, resolution=3 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=True, norm_len=4, norm_res=2, resolution=3) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control, @@ -737,12 +644,7 @@ def fn(x, y): # Tracking heliostat. sf.set_full_field_tracking(aimpoint_xyz, when_ymdhmsz) - comments.append( - 'Heliostats set to track to ' - + str(aimpoint_xyz) - + ' at ymdhmsz =' - + str(when_ymdhmsz) - ) + comments.append('Heliostats set to track to ' + str(aimpoint_xyz) + ' at ymdhmsz =' + str(when_ymdhmsz)) fig_record = fm.setup_figure_for_3d_data( self.figure_control, self.axis_control_m, diff --git a/opencsp/common/lib/test/test_RayTraceOutput.py b/opencsp/common/lib/test/test_RayTraceOutput.py index c0906775..d3465a37 100644 --- a/opencsp/common/lib/test/test_RayTraceOutput.py +++ b/opencsp/common/lib/test/test_RayTraceOutput.py @@ -40,9 +40,7 @@ from opencsp.common.lib.csp.ufacet.Heliostat import Heliostat from opencsp.common.lib.csp.LightPath import LightPath from opencsp.common.lib.csp.LightSourceSun import LightSourceSun -from opencsp.common.lib.csp.MirrorParametricRectangular import ( - MirrorParametricRectangular, -) +from opencsp.common.lib.csp.MirrorParametricRectangular import MirrorParametricRectangular from opencsp.common.lib.csp.Scene import Scene from opencsp.common.lib.csp.SolarField import SolarField from opencsp.common.lib.geometry.Pxyz import Pxyz @@ -50,19 +48,11 @@ from opencsp.common.lib.geometry.Vxyz import Vxyz from opencsp.common.lib.render.View3d import View3d from opencsp.common.lib.render_control.RenderControlAxis import RenderControlAxis -from opencsp.common.lib.render_control.RenderControlEnsemble import ( - RenderControlEnsemble, -) +from opencsp.common.lib.render_control.RenderControlEnsemble import RenderControlEnsemble from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) -from opencsp.common.lib.render_control.RenderControlLightPath import ( - RenderControlLightPath, -) -from opencsp.common.lib.render_control.RenderControlRayTrace import ( - RenderControlRayTrace, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord +from opencsp.common.lib.render_control.RenderControlLightPath import RenderControlLightPath +from opencsp.common.lib.render_control.RenderControlRayTrace import RenderControlRayTrace from opencsp.common.lib.render_control.RenderControlSurface import RenderControlSurface @@ -93,16 +83,8 @@ def setup_class( self.m1_len_y = 3.0 # m self.m1_rectangle_xy = (self.m1_len_x, self.m1_len_y) self.m1 = MirrorParametricRectangular(self.m1_fxn, self.m1_rectangle_xy) - self.m1_shape_description = ( - 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' - ) - self.m1_title = ( - 'Mirror (' - + self.m1_shape_description - + ', f=' - + str(self.m1_focal_length) - + 'm), Face Up' - ) + self.m1_shape_description = 'rectangle ' + str(self.m1_len_x) + 'm x ' + str(self.m1_len_y) + 'm' + self.m1_title = 'Mirror (' + self.m1_shape_description + ', f=' + str(self.m1_focal_length) + 'm), Face Up' self.m1_caption = ( 'A single mirror of shape (' + self.m1_shape_description @@ -133,26 +115,16 @@ def setup_class( # Set canting angles. cos5 = np.cos(np.deg2rad(8)) sin5 = np.sin(np.deg2rad(8)) - tilt_up = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]]) - ) - tilt_down = Rotation.from_matrix( - np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]]) - ) - tilt_left = Rotation.from_matrix( - np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]]) - ) - tilt_right = Rotation.from_matrix( - np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]]) - ) + tilt_up = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, -sin5], [0, sin5, cos5]])) + tilt_down = Rotation.from_matrix(np.asarray([[1, 0, 0], [0, cos5, sin5], [0, -sin5, cos5]])) + tilt_left = Rotation.from_matrix(np.asarray([[cos5, 0, sin5], [0, 1, 0], [-sin5, 0, cos5]])) + tilt_right = Rotation.from_matrix(np.asarray([[cos5, 0, -sin5], [0, 1, 0], [sin5, 0, cos5]])) self.h2x2_f1.canting = tilt_left * tilt_up self.h2x2_f2.canting = tilt_right * tilt_up self.h2x2_f3.canting = tilt_left * tilt_down self.h2x2_f4.canting = tilt_right * tilt_down self.h2x2_facets = [self.h2x2_f1, self.h2x2_f2, self.h2x2_f3, self.h2x2_f4] - self.h2x2 = Heliostat( - 'Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0 - ) + self.h2x2 = Heliostat('Simple 2x2 Heliostat', [0, 0, 0], 4, 2, 2, self.h2x2_facets, 0, 0) self.h2x2_title = 'Heliostat with Parametrically Defined Facets' self.h2x2_caption = ( 'Heliostat with four facets (' @@ -164,37 +136,15 @@ def setup_class( self.h2x2_comments = [] # Simple solar field, with two simple heliostats. - self.sf2x2_h1 = Heliostat( - 'Heliostat 1', - [0, 0, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) - self.sf2x2_h2 = Heliostat( - 'Heliostat 2', - [0, 10, 0], - 4, - 2, - 2, - copy.deepcopy(self.h2x2_facets), - 4.02, - 0.1778, - ) + self.sf2x2_h1 = Heliostat('Heliostat 1', [0, 0, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) + self.sf2x2_h2 = Heliostat('Heliostat 2', [0, 10, 0], 4, 2, 2, copy.deepcopy(self.h2x2_facets), 4.02, 0.1778) self.sf2x2_heliostats = [self.sf2x2_h1, self.sf2x2_h2] - self.sf2x2 = SolarField( - 'Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats - ) + self.sf2x2 = SolarField('Test Field', 'test', [-106.509606, 34.962276], self.sf2x2_heliostats) self.sf2x2_title = 'Two Heliostats' self.sf2x2_caption = 'Two 4-facet heliostats, tracking.' self.sf2x2_comments = [] - def lambda_symmetric_paraboloid( - self, focal_length: float - ) -> Callable[[float, float], float]: + def lambda_symmetric_paraboloid(self, focal_length: float) -> Callable[[float, float], float]: """ Helper function that makes lambdas of paraboloids of a given focal length. """ @@ -228,9 +178,7 @@ def test_draw_simple_ray(self) -> None: incoming_vector = Vxyz([0, 1, -1]) ref_vec = rt.calc_reflected_ray(normal_vector, incoming_vector) ray = LightPath(points, incoming_vector, ref_vec) - light_path_control = RenderControlLightPath( - line_render_control=rcps.RenderControlPointSeq(color='y') - ) + light_path_control = RenderControlLightPath(line_render_control=rcps.RenderControlPointSeq(color='y')) ray.draw(view, light_path_control) # Output. @@ -257,9 +205,7 @@ def test_mirror_trace(self) -> None: # Face Up, Parallel Beams yz - ls.incident_rays = LightPath.many_rays_from_many_vectors( - None, Vxyz([0, 0, -1]) - ) # straight down + ls.incident_rays = LightPath.many_rays_from_many_vectors(None, Vxyz([0, 0, -1])) # straight down m1.set_position_in_space(tran, rot_id) @@ -283,24 +229,18 @@ def test_mirror_trace(self) -> None: ) view1_yz = fig_record.view - trace1.draw( - view1_yz, RenderControlRayTrace(light_path_control=light_path_control) - ) + trace1.draw(view1_yz, RenderControlRayTrace(light_path_control=light_path_control)) m1.draw(view1_yz, mirror_control) # Output. self.show_save_and_check_figure(fig_record, dpi=150) # set of inc vectors to test - test_vecs = Uxyz( - [[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]] - ) + test_vecs = Uxyz([[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]]) # 45 degree rotation, cone of beams 3d - ls.incident_rays = LightPath.many_rays_from_many_vectors( - None, test_vecs.rotate(rot_45_deg) - ) + ls.incident_rays = LightPath.many_rays_from_many_vectors(None, test_vecs.rotate(rot_45_deg)) m1.set_position_in_space(tran, rot_45_deg) @@ -355,9 +295,7 @@ def h_func(x, y): facet_height=1.2192, default_mirror_shape=h_func, ) - sf_curved = sf.SolarField( - "mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_curved] - ) + sf_curved = sf.SolarField("mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_curved]) h_canted = helio.h_from_facet_centroids( "NSTTF Heliostat 05W01", @@ -373,13 +311,9 @@ def h_func(x, y): default_mirror_shape=h_func, ) h_canted.set_canting_from_equation(h_func) - sf_canted = sf.SolarField( - "mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_canted] - ) + sf_canted = sf.SolarField("mini Nsttf with 5W1 and 14W1", "mini Field", loc, [h_canted]) - mirror_control = rcm.RenderControlMirror( - surface_normals=False, norm_len=8, norm_res=2, resolution=3 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=False, norm_len=8, norm_res=2, resolution=3) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control, @@ -407,9 +341,7 @@ def h_func(x, y): # RAY TRACING # set of inc vectors to test - test_vecs = Uxyz( - [[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]] - ) + test_vecs = Uxyz([[0, 0, 0, 0.1, -0.1], [0, 0.1, -0.1, 0, 0], [-1, -1, -1, -1, -1]]) sun = LightSourceSun() # sun.set_incident_rays(loc, when_ymdhmsz, 3) @@ -469,9 +401,7 @@ def h_func(x, y): def test_changing_time_of_day(self) -> None: # create a figure that shows 5w1 reflecting the sun towards an aimpoint -- TODO tjlarki: sun rays coming from wrong direction - def _heliostat_at_moment( - name: str, aimpoint_xyz: tuple, when_ymdhmsz: tuple, i: int - ) -> None: + def _heliostat_at_moment(name: str, aimpoint_xyz: tuple, when_ymdhmsz: tuple, i: int) -> None: self.start_test() local_comments = [] @@ -496,13 +426,9 @@ def fn_5w1(x, y): heliostats = [h_05w01] - sf1 = sf.SolarField( - "mini Nsttf with 5W1", "mini Field", lln.NSTTF_ORIGIN, heliostats - ) + sf1 = sf.SolarField("mini Nsttf with 5W1", "mini Field", lln.NSTTF_ORIGIN, heliostats) - mirror_control = rcm.RenderControlMirror( - surface_normals=False, norm_len=8, norm_res=2, resolution=3 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=False, norm_len=8, norm_res=2, resolution=3) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=True, mirror_styles=mirror_control, @@ -536,9 +462,7 @@ def _draw_helper(view: View3d) -> None: scene.add_object(sf1) scene.add_light_source(sun) - path_control = RenderControlLightPath( - current_length=100, init_length=20 - ) + path_control = RenderControlLightPath(current_length=100, init_length=20) trace_control = RenderControlRayTrace(light_path_control=path_control) trace = rt.trace_scene(scene, obj_resolution=1) @@ -578,9 +502,7 @@ def _draw_helper(view: View3d) -> None: view_xz = fig_record.view _draw_helper(view_xz) - _heliostat_at_moment( - "5w1 at 15:02", [60.0, 8.8, 28.9], (2021, 5, 13, 15, 2, 0, -6), 24 - ) + _heliostat_at_moment("5w1 at 15:02", [60.0, 8.8, 28.9], (2021, 5, 13, 15, 2, 0, -6), 24) return # partial field test @@ -624,35 +546,25 @@ def test_partial_field_trace(self) -> None: '5W5', ] - solar_field_csv = os.path.join( - self.actual_output_dir, 'test_partial_field_trace.csv' - ) + solar_field_csv = os.path.join(self.actual_output_dir, 'test_partial_field_trace.csv') solar_field = stest.load_solar_field_partition(heliostat_list, solar_field_csv) # Tracking setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_outlines(color='b') # Comment fig_record.comments.append("Partial Solar Field Trace.") - fig_record.comments.append( - "Using 1 ray per surface normal and one surface normal per mirror." - ) + fig_record.comments.append("Using 1 ray per surface normal and one surface normal per mirror.") fig_record.comments.append( "Mirror curvature and canting is defined per heliostat. They are both parabolic and have focal lengths based on the distance of the heliostat to the tower." ) - fig_record.comments.append( - "Traces one in every 13 heliostats in the NSTTF field." - ) + fig_record.comments.append("Traces one in every 13 heliostats in the NSTTF field.") # Draw - mirror_control = rcm.RenderControlMirror( - surface_normals=False, norm_len=8, norm_res=2, resolution=2 - ) + mirror_control = rcm.RenderControlMirror(surface_normals=False, norm_len=8, norm_res=2, resolution=2) facet_control = rcf.RenderControlFacet( draw_mirror_curvature=False, mirror_styles=mirror_control, @@ -675,9 +587,7 @@ def test_partial_field_trace(self) -> None: ) solar_field.draw(view, solar_field_style) - view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') sun = LightSourceSun() sun.set_incident_rays(lln.NSTTF_ORIGIN, when_ymdhmsz, 1) @@ -689,9 +599,7 @@ def test_partial_field_trace(self) -> None: trace = rt.trace_scene(scene, 1, verbose=True) trace.draw(view, RenderControlRayTrace(RenderControlLightPath(15, 200))) - view.draw_xyz( - aimpoint_xyz, rcps.RenderControlPointSeq(color='orange', marker='.') - ) + view.draw_xyz(aimpoint_xyz, rcps.RenderControlPointSeq(color='orange', marker='.')) self.show_save_and_check_figure(fig_record, dpi=150) diff --git a/opencsp/common/lib/test/test_SolarFieldOutput.py b/opencsp/common/lib/test/test_SolarFieldOutput.py index 480966a6..10f60031 100755 --- a/opencsp/common/lib/test/test_SolarFieldOutput.py +++ b/opencsp/common/lib/test/test_SolarFieldOutput.py @@ -24,9 +24,7 @@ import opencsp.common.lib.render_control.RenderControlFacet as rcf import opencsp.common.lib.render_control.RenderControlFigure as rcfg from opencsp.common.lib.render_control.RenderControlFigure import RenderControlFigure -from opencsp.common.lib.render_control.RenderControlFigureRecord import ( - RenderControlFigureRecord, -) +from opencsp.common.lib.render_control.RenderControlFigureRecord import RenderControlFigureRecord import opencsp.common.lib.render_control.RenderControlHeliostat as rch import opencsp.common.lib.render_control.RenderControlPointSeq as rcps import opencsp.common.lib.render_control.RenderControlSolarField as rcsf @@ -107,12 +105,8 @@ def test_multi_heliostat(self) -> None: ['13E1', hc.face_up(), rch.facet_outlines_normals(color='c')], ['13E2', hc.NSTTF_stow(), rch.facet_outlines_corner_normals()], ] - solar_field_csv = os.path.join( - self.actual_output_dir, 'test_multi_heliostat.csv' - ) - solar_field = stest.load_solar_field_partition( - [name[0] for name in heliostat_spec_list], solar_field_csv - ) + solar_field_csv = os.path.join(self.actual_output_dir, 'test_multi_heliostat.csv') + solar_field = stest.load_solar_field_partition([name[0] for name in heliostat_spec_list], solar_field_csv) # View setup title = 'Example Poses and Styles' @@ -152,14 +146,10 @@ def test_multi_heliostat(self) -> None: comments.append("Green: Centroid and name.") comments.append("Blue: Facet outlines.") comments.append("Cyan: Overall outline and overall surface normal.") - comments.append( - "Magneta: Overall outline and overall surface normal, drawn at corners." - ) + comments.append("Magneta: Overall outline and overall surface normal, drawn at corners.") comments.append("Green: Facet outlines and overall surface normal.") comments.append("Cyan: Facet outlines and facet surface normals.") - comments.append( - "Black: Facet outlines and facet surface normals drawn at facet corners." - ) + comments.append("Black: Facet outlines and facet surface normals drawn at facet corners.") # Output. self.show_save_and_check_figure(fig_record, dpi=150) @@ -236,30 +226,11 @@ def test_solar_field_subset(self) -> None: '7E6', '7E7', ] - tracking_heliostats = [ - '8E1', - '8E2', - '8E4', - '8E6', - '8E7', - '9E1', - '9E2', - '9E3', - '9E4', - '9E5', - '9E6', - '9E7', - ] + tracking_heliostats = ['8E1', '8E2', '8E4', '8E6', '8E7', '9E1', '9E2', '9E3', '9E4', '9E5', '9E6', '9E7'] - solar_field_csv = os.path.join( - self.actual_output_dir, 'test_solar_field_subset.csv' - ) + solar_field_csv = os.path.join(self.actual_output_dir, 'test_solar_field_subset.csv') solar_field = stest.load_solar_field_partition( - mirrored_heliostats - + up_heliostats - + stowed_heliostats - + synched_heliostats - + tracking_heliostats, + mirrored_heliostats + up_heliostats + stowed_heliostats + synched_heliostats + tracking_heliostats, solar_field_csv, ) @@ -280,42 +251,24 @@ def test_solar_field_subset(self) -> None: up_el = np.deg2rad(90) # Configuration setup - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) solar_field.set_heliostats_configuration(stowed_heliostats, hc.NSTTF_stow()) synch_configuration = hc.HeliostatConfiguration(az=synch_az, el=synch_el) - solar_field.set_heliostats_configuration( - synched_heliostats, synch_configuration - ) - solar_field.set_heliostats_configuration( - mirrored_heliostats, synch_configuration - ) + solar_field.set_heliostats_configuration(synched_heliostats, synch_configuration) + solar_field.set_heliostats_configuration(mirrored_heliostats, synch_configuration) up_configuration = hc.HeliostatConfiguration(az=up_az, el=up_el) solar_field.set_heliostats_configuration(up_heliostats, up_configuration) # Style setup solar_field_style = rcsf.heliostat_blanks() - solar_field_style.heliostat_styles.add_special_names( - mirrored_heliostats, rch.mirror_surfaces() - ) - solar_field_style.heliostat_styles.add_special_names( - up_heliostats, rch.facet_outlines(color='c') - ) - solar_field_style.heliostat_styles.add_special_names( - stowed_heliostats, rch.normal_outline(color='r') - ) - solar_field_style.heliostat_styles.add_special_names( - synched_heliostats, rch.normal_outline(color='g') - ) - solar_field_style.heliostat_styles.add_special_names( - tracking_heliostats, rch.facet_outlines(color='b') - ) + solar_field_style.heliostat_styles.add_special_names(mirrored_heliostats, rch.mirror_surfaces()) + solar_field_style.heliostat_styles.add_special_names(up_heliostats, rch.facet_outlines(color='c')) + solar_field_style.heliostat_styles.add_special_names(stowed_heliostats, rch.normal_outline(color='r')) + solar_field_style.heliostat_styles.add_special_names(synched_heliostats, rch.normal_outline(color='g')) + solar_field_style.heliostat_styles.add_special_names(tracking_heliostats, rch.facet_outlines(color='b')) # Comment - comments.append( - "A subset of heliostats selected, so that plot is effectively zoomed in." - ) + comments.append("A subset of heliostats selected, so that plot is effectively zoomed in.") comments.append("Grey heliostat shows mirrored surfaces.") comments.append("Blue heliostats are tracking.") comments.append("Cyan heliostats are face up.") @@ -364,17 +317,13 @@ def test_heliostat_vector_field(self) -> None: aimpoint_xyz = [60.0, 8.8, 28.9] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # NSTTF solar noon # [year, month, day, hour, minute, second, zone] - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_vector_field(color='b') # Comment - comments.append( - "Each heliostat's surface normal, which can be viewed as a vector field." - ) + comments.append("Each heliostat's surface normal, which can be viewed as a vector field.") # Draw and produce output for xz fig_record = fm.setup_figure_for_3d_data( @@ -390,9 +339,7 @@ def test_heliostat_vector_field(self) -> None: comments=comments, code_tag=self.code_tag, ) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') solar_field.draw(fig_record.view, solar_field_style) self.show_save_and_check_figure(fig_record, dpi=150) @@ -421,9 +368,7 @@ def test_dense_vector_field(self) -> None: aimpoint_xyz = [60.0, 8.8, 28.9] when_ymdhmsz = [2021, 5, 13, 13, 2, 0, -6] # NSTTF solar noon # [year, month, day, hour, minute, second, zone] - solar_field.set_full_field_tracking( - aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz - ) + solar_field.set_full_field_tracking(aimpoint_xyz=aimpoint_xyz, when_ymdhmsz=when_ymdhmsz) # Style setup solar_field_style = rcsf.heliostat_vector_field_outlines(color='grey') @@ -446,25 +391,16 @@ def test_dense_vector_field(self) -> None: code_tag=self.code_tag, ) solar_field.draw(fig_record.view, solar_field_style) - fig_record.view.draw_xyz( - aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz' - ) + fig_record.view.draw_xyz(aimpoint_xyz, style=rcps.marker(color='tab:orange'), label='aimpoint_xyz') # Draw dense vector field. grid_xy = solar_field.heliostat_field_regular_grid_xy(40, 20) # grid_xydxy = [[p, sunt.tracking_surface_normal_xy(p+[0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz)] for p in grid_xy] grid_xydxy = [ - [ - p, - sun_track.tracking_surface_normal_xy( - p + [0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz - ), - ] + [p, sun_track.tracking_surface_normal_xy(p + [0], aimpoint_xyz, solar_field.origin_lon_lat, when_ymdhmsz)] for p in grid_xy ] - fig_record.view.draw_pqdpq_list( - grid_xydxy, style=rcps.vector_field(color='b', vector_scale=5.0) - ) + fig_record.view.draw_pqdpq_list(grid_xydxy, style=rcps.vector_field(color='b', vector_scale=5.0)) # Output. self.show_save_and_check_figure(fig_record, dpi=150) diff --git a/opencsp/common/lib/test/test_TargetColor.py b/opencsp/common/lib/test/test_TargetColor.py index 404f0771..097e9979 100755 --- a/opencsp/common/lib/test/test_TargetColor.py +++ b/opencsp/common/lib/test/test_TargetColor.py @@ -73,12 +73,7 @@ def setup_class( # ?? SCAFFOLDING RCB -- ADD COLOR_BAR TYPE TIP BELOW def execute_test_linear_color_bar( - self, - color_below_min: Color, - color_bar, - color_bar_name: str, - color_above_max: Color, - generate_all: bool, + self, color_below_min: Color, color_bar, color_bar_name: str, color_above_max: Color, generate_all: bool ) -> None: if generate_all: # Linear color bar in x, continuous. @@ -93,9 +88,7 @@ def execute_test_linear_color_bar( 'x', 'continuous', ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # Linear color bar in y, discrete. target = tc.construct_target_linear_color_bar( @@ -109,25 +102,15 @@ def execute_test_linear_color_bar( 'y', 'discrete', ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # ?? SCAFFOLDING RCB -- ADD COLOR_BAR TYPE TIP BELOW def execute_test_polar_color_bar( - self, - color_below_min: Color, - color_bar, - color_bar_name: str, - color_above_max: Color, + self, color_below_min: Color, color_bar, color_bar_name: str, color_above_max: Color ) -> None: # Default. - target = tc.construct_target_polar_color_bar( - self.image_width, self.image_height, self.dpm - ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + target = tc.construct_target_polar_color_bar(self.image_width, self.image_height, self.dpm) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # Selected for first 3m x 3m print. target = tc.construct_target_polar_color_bar( @@ -140,9 +123,7 @@ def execute_test_polar_color_bar( radial_gradient_name='l2s', light_center_to_saturated_saturation_min=0.2, ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # # Saturated center to white, varying exponent. # for exponent in np.arange(0, 4.0, 0.5): @@ -216,51 +197,35 @@ def execute_test_polar_color_bar( def execute_test_blue_under_red_cross_green(self) -> None: # Construct target. - target = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + target = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') def execute_test_rgb_cube_inscribed_square(self, project_to_cube: bool) -> None: # Construct target. target = tc.construct_target_rgb_cube_inscribed_square( self.image_width, self.image_height, self.dpm, project_to_cube ) - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # TARGET MODIFICATION TESTS def execute_test_adjust_color_saturation(self, saturation_fraction: float) -> None: # Construct target. - target = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) + target = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) # Adjust color saturation. target.adjust_color_saturation(saturation_fraction) - print( - 'WARNING: In execute_test_adjust_color_saturation(), saturation adjustment not implemented yet.' - ) + print('WARNING: In execute_test_adjust_color_saturation(), saturation adjustment not implemented yet.') # Save and check. - self.save_and_check_image( - target.image, self.dpm, target.description_inch(), '.png' - ) + self.save_and_check_image(target.image, self.dpm, target.description_inch(), '.png') # TARGET EXTENSION TESTS def execute_test_extend_target(self) -> None: # Target. - target = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) + target = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) # Border all around. border_pixels = 5 # Pixels - extended_target_left_right_top_bottom_border = tc.extend_target_all( - target, border_pixels, Color.magenta() - ) + extended_target_left_right_top_bottom_border = tc.extend_target_all(target, border_pixels, Color.magenta()) self.save_and_check_image( extended_target_left_right_top_bottom_border.image, self.dpm, @@ -273,9 +238,7 @@ def execute_test_extend_target(self) -> None: def execute_test_splice_targets_above_below(self) -> None: # ?? SCAFFOLDING RCB -- FIXUP PARAMETER PASSING, ETC. # Target #1. - target_1 = tc.construct_target_blue_under_red_cross_green( - self.image_width, self.image_height, self.dpm - ) + target_1 = tc.construct_target_blue_under_red_cross_green(self.image_width, self.image_height, self.dpm) # Target #2. # Selected for first 3m x 3m print. # project_to_cube = True @@ -293,12 +256,8 @@ def execute_test_splice_targets_above_below(self) -> None: # Combine. # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? gap = 0 # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? - spliced_target = tc.splice_targets_above_below( - target_1, target_2, gap, Color.white() - ) - self.save_and_check_image( - spliced_target.image, self.dpm, spliced_target.description_inch(), '.png' - ) + spliced_target = tc.splice_targets_above_below(target_1, target_2, gap, Color.white()) + self.save_and_check_image(spliced_target.image, self.dpm, spliced_target.description_inch(), '.png') def execute_test_cascade_target_A(self) -> None: # For tall linear target elements. @@ -336,9 +295,7 @@ def execute_test_cascade_target_A(self) -> None: # Main: Color bar corrected for Nikon D3300 response. color_below_min = Color.black() # Black below bottom of color bar. color_bar = tcc.nikon_D3300_monitor_equal_step_color_bar() - color_bar_name = ( - 'D3300_monitor' # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS MEMBER - ) + color_bar_name = 'D3300_monitor' # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS MEMBER color_above_max = Color.white() # White background for "saturated data." # Closed color wheel linear color bar. ref_color_below_min = Color.black() # Black below bottom of color bar. @@ -387,10 +344,7 @@ def execute_test_cascade_target_A(self) -> None: list_of_saturation_spec_lists=[ [[None, None, None, None]], # 2-stack - [ - ['light_to_saturated', None, 0.4, 1.0], - ['saturated_to_white', 1.25, None, None], - ], + [['light_to_saturated', None, 0.4, 1.0], ['saturated_to_white', 1.25, None, None]], # 5-stack [ [None, None, None, None], @@ -420,9 +374,7 @@ def execute_test_cascade_target_A(self) -> None: gap_between_bars_pix=round( gap_between_bars * composite_dpm ), # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? - ref_gap_pix=round( - ref_gap * composite_dpm - ), # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? + ref_gap_pix=round(ref_gap * composite_dpm), # Pixels # ?? SCAFFOLDING RCB -- SHOULD THIS BE IN INCHES? gap_color=Color.white(), ) @@ -430,25 +382,14 @@ def execute_test_cascade_target_A(self) -> None: n_ticks_x = 13 # No units. Number of tick marks to draw along top/bottom horizontal target edges. n_ticks_y = 25 # No units. Number of tick marks to draw along left/right vertical target edges. tick_length = 0.010 # Meters. Length to draw edge tick marks. - tick_width_pix = ( - 3 # Pixels. Width to draw edge tick marks; should be odd number. - ) + tick_width_pix = 3 # Pixels. Width to draw edge tick marks; should be odd number. tick_color: Color = Color.black() # Color. Color of edge tick marks. - cascade_target.set_ticks_along_top_and_bottom_edges( - n_ticks_x, tick_length, tick_width_pix, tick_color - ) - cascade_target.set_ticks_along_left_and_right_edges( - n_ticks_y, tick_length, tick_width_pix, tick_color - ) + cascade_target.set_ticks_along_top_and_bottom_edges(n_ticks_x, tick_length, tick_width_pix, tick_color) + cascade_target.set_ticks_along_left_and_right_edges(n_ticks_y, tick_length, tick_width_pix, tick_color) # Save result. # self.save_and_check_image(cascade_target.image, composite_dpm, cascade_target.description_inch(), '.tiff') #'.png') - self.save_and_check_image( - cascade_target.image, - composite_dpm, - cascade_target.description_inch(), - '.png', - ) + self.save_and_check_image(cascade_target.image, composite_dpm, cascade_target.description_inch(), '.png') def test_matlab(self) -> None: # Initialize test. @@ -458,9 +399,7 @@ def test_matlab(self) -> None: color_bar = tcc.matlab_color_bar() color_bar_name = 'matlab' color_above_max = Color.white() # White background for "saturated data." - self.execute_test_linear_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max, False - ) + self.execute_test_linear_color_bar(color_below_min, color_bar, color_bar_name, color_above_max, False) def test_matlab_equal_angle(self) -> None: # Initialize test. @@ -470,9 +409,7 @@ def test_matlab_equal_angle(self) -> None: color_bar = tcc.normalize_color_bar_to_equal_angles(tcc.matlab_color_bar()) color_bar_name = 'matlab_equal_angle' color_above_max = Color.white() # White background for "saturated data." - self.execute_test_linear_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max, False - ) + self.execute_test_linear_color_bar(color_below_min, color_bar, color_bar_name, color_above_max, False) # # Closed corner tour color bar. # # ?? SCAFFOLDING RCB -- USE THIS TO CLARIFY AND THEN FIX PROBLEMS WITH COLOR INTERPOLATION. @@ -489,14 +426,10 @@ def test_corner_tour_closed_equal_angle(self) -> None: self.start_test() # Normalized closed corner tour color bar. color_below_min = Color.black() # Black below bottom of color bar. - color_bar = tcc.normalize_color_bar_to_equal_angles( - tcc.corner_tour_closed_color_bar() - ) + color_bar = tcc.normalize_color_bar_to_equal_angles(tcc.corner_tour_closed_color_bar()) color_bar_name = 'corner_tour_closed_equal_angle' color_above_max = Color.white() # White background for "saturated data." - self.execute_test_linear_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max, True - ) + self.execute_test_linear_color_bar(color_below_min, color_bar, color_bar_name, color_above_max, True) # # Closed color wheel linear color bar. # color_below_min = Color.black() # Black below bottom of color bar. @@ -513,9 +446,7 @@ def test_polar_color_bar(self) -> None: color_bar = tcc.O_color_bar() color_bar_name = 'O' # ?? SCAFFOLDING RCB -- THIS SHOULD BE A CLASS MEMBER color_above_max = Color.white() # White background for "saturated data." - self.execute_test_polar_color_bar( - color_below_min, color_bar, color_bar_name, color_above_max - ) + self.execute_test_polar_color_bar(color_below_min, color_bar, color_bar_name, color_above_max) def test_blue_under_red_cross_green(self) -> None: # Initialize test. diff --git a/opencsp/common/lib/tool/dict_tools.py b/opencsp/common/lib/tool/dict_tools.py index 1bb94742..144866c6 100644 --- a/opencsp/common/lib/tool/dict_tools.py +++ b/opencsp/common/lib/tool/dict_tools.py @@ -101,9 +101,7 @@ def print_dict_of_dicts( if len(value_2_str) > len(trimmed_value_2_str): trimmed_value_2_str += '...' # Print key : value. - print( - indent_str_2 + format_str.format(str(key_2) + ':', trimmed_value_2_str) - ) + print(indent_str_2 + format_str.format(str(key_2) + ':', trimmed_value_2_str)) # Level 2 postamble. if max_keys_2 < len(key_list_2): print(indent_str_2 + '...') @@ -146,9 +144,7 @@ def print_dict_of_dict_of_dicts( print(indent_str_2 + str(key_2) + ':') # Fetch level 3 dictionary. dict_3 = dict_2[key_2] - key_list_3 = sorted_keys( - dict_3 - ) # Sort key list for consistent output order. + key_list_3 = sorted_keys(dict_3) # Sort key list for consistent output order. # Determine the length of the longest key in this level 3 dictionary. level_3_key_max_len = max([len(str(x)) for x in key_list_3]) format_str = '{0:<' + str(level_3_key_max_len + 2) + 's}{1:s}' @@ -161,10 +157,7 @@ def print_dict_of_dict_of_dicts( if len(value_3_str) > len(trimmed_value_3_str): trimmed_value_3_str += '...' # Print key : value. - print( - indent_str_3 - + format_str.format(str(key_3) + ':', trimmed_value_3_str) - ) + print(indent_str_3 + format_str.format(str(key_3) + ':', trimmed_value_3_str)) # Level 3 postamble. if max_keys_3 < len(key_list_3): print(indent_str_3 + '...') @@ -185,12 +178,7 @@ def print_dict_of_dict_of_dicts( def save_list_of_one_level_dicts( - list_of_one_level_dicts, - output_dir, - output_body, - explain, - error_if_dir_not_exist, - first_key=None, + list_of_one_level_dicts, output_dir, output_body, explain, error_if_dir_not_exist, first_key=None ): """ A "one_level_dict" is a dictionary with entries that can be converted to strings and written as single entries in a csv file. @@ -199,9 +187,7 @@ def save_list_of_one_level_dicts( """ if len(list_of_one_level_dicts) == 0: explain += ' (EMPTY)' - key_list, heading_line = one_level_dict_csv_heading_line( - list_of_one_level_dicts, first_key - ) + key_list, heading_line = one_level_dict_csv_heading_line(list_of_one_level_dicts, first_key) data_lines = [] for one_level_dict in list_of_one_level_dicts: data_lines.append(one_level_dict_csv_data_line(key_list, one_level_dict)) @@ -238,9 +224,7 @@ def one_level_dict_csv_heading_line_aux(first_one_level_dict, first_key): # Verify that selceted first key is present. if (first_key != None) and (not first_key_found): print( - 'ERROR: In one_level_dict_heading_line_aux(), expected first key=' - + str(first_key) - + ' not found in keys:', + 'ERROR: In one_level_dict_heading_line_aux(), expected first key=' + str(first_key) + ' not found in keys:', key_list, ) assert False @@ -291,11 +275,7 @@ def save_list_of_one_level_dict_pairs( ) data_lines = [] for one_level_dict_pair in list_of_one_level_dict_pairs: - data_lines.append( - one_level_dict_pair_csv_data_line( - key_list_1, key_list_2, one_level_dict_pair - ) - ) + data_lines.append(one_level_dict_pair_csv_data_line(key_list_1, key_list_2, one_level_dict_pair)) output_dir_body_ext = ft.write_csv_file( explain, # Explanatory string to include in notification output. None to skip. output_dir, # Directory to write file. See below if not exist. @@ -308,9 +288,7 @@ def save_list_of_one_level_dict_pairs( return output_dir_body_ext -def one_level_dict_pair_csv_heading_line( - list_of_one_level_dict_pairs, first_key_1, first_key_2 -): +def one_level_dict_pair_csv_heading_line(list_of_one_level_dict_pairs, first_key_1, first_key_2): """ This routine extracts the headings from each of the two dicts, adding suffixes "_1" or "_2" to the first and second dictionary keys, respectively. These suffixes are added regardless of whether the dictionaries have @@ -323,17 +301,13 @@ def one_level_dict_pair_csv_heading_line( first_pair = list_of_one_level_dict_pairs[0] # Determine headings from keys of first dictionary. one_level_dict_1 = first_pair[0] - key_list_1, heading_line_1 = one_level_dict_csv_heading_line_aux( - one_level_dict_1, first_key_1 - ) + key_list_1, heading_line_1 = one_level_dict_csv_heading_line_aux(one_level_dict_1, first_key_1) heading_line_tokens_1 = heading_line_1.split(',') heading_line_tokens_1b = [token + '_1' for token in heading_line_tokens_1] heading_line_1b = ','.join(heading_line_tokens_1b) # Determine headings from keys of second dictionary. one_level_dict_2 = first_pair[1] - key_list_2, heading_line_2 = one_level_dict_csv_heading_line_aux( - one_level_dict_2, first_key_2 - ) + key_list_2, heading_line_2 = one_level_dict_csv_heading_line_aux(one_level_dict_2, first_key_2) heading_line_tokens_2 = heading_line_2.split(',') heading_line_tokens_2b = [token + '_2' for token in heading_line_tokens_2] heading_line_2b = ','.join(heading_line_tokens_2b) diff --git a/opencsp/common/lib/tool/file_tools.py b/opencsp/common/lib/tool/file_tools.py index eaec576c..fdde7faf 100755 --- a/opencsp/common/lib/tool/file_tools.py +++ b/opencsp/common/lib/tool/file_tools.py @@ -92,9 +92,7 @@ def resolve_symlink(input_dir_body_ext: str): return input_dir_body_ext -def file_exists( - input_dir_body_ext: str, error_if_exists_as_dir=True, follow_symlinks=False -): +def file_exists(input_dir_body_ext: str, error_if_exists_as_dir=True, follow_symlinks=False): """ Determines whether the given file exists. If the specified input path exists but is a directory instead of a file, @@ -107,8 +105,7 @@ def file_exists( if error_if_exists_as_dir == True: lt.error_and_raise( RuntimeError, - 'ERROR: In file_exists(), requested input path exists and is a directory: ' - + str(input_dir_body_ext), + 'ERROR: In file_exists(), requested input path exists and is a directory: ' + str(input_dir_body_ext), ) else: return False @@ -119,9 +116,7 @@ def file_exists( return False -def directory_exists( - input_dir: str, error_if_exists_as_file=True, follow_symlinks=False -): +def directory_exists(input_dir: str, error_if_exists_as_file=True, follow_symlinks=False): """ Determines whether the given directory exists. If the specified input directory exists but is a file instead of a directory, @@ -134,8 +129,7 @@ def directory_exists( if error_if_exists_as_file == True: lt.error_and_raise( RuntimeError, - 'ERROR: In directory_exists(), requested input path exists and is a file: ' - + str(input_dir), + 'ERROR: In directory_exists(), requested input path exists and is a file: ' + str(input_dir), ) else: return False @@ -155,15 +149,11 @@ def directory_is_empty(input_dir): # Check input. if os.path.isfile(input_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In directory_is_empty(), requested input path exists and is a file: ' - + str(input_dir), + RuntimeError, 'ERROR: In directory_is_empty(), requested input path exists and is a file: ' + str(input_dir) ) if not os.path.isdir(input_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In directory_is_empty(), requested input directory does not exist: ' - + str(input_dir), + RuntimeError, 'ERROR: In directory_is_empty(), requested input directory does not exist: ' + str(input_dir) ) # Probe the directory contents to determine whether any contents are there. # The standard Unix files "." and ".." don't count as contents. @@ -180,9 +170,7 @@ def directory_is_empty(input_dir): def count_items_in_directory( - input_dir, - name_prefix=None, # Only entries with names thata start with name_prefix are counted. - name_suffix=None, + input_dir, name_prefix=None, name_suffix=None # Only entries with names thata start with name_prefix are counted. ): # Only entries with names thata end with name_suffix are counted. """ Counts the number of items in the given directory. @@ -195,20 +183,17 @@ def count_items_in_directory( if not os.path.exists(input_dir): lt.error_and_raise( RuntimeError, - 'ERROR: In count_items_in_directory(), requested input directory does not exist: ' - + str(input_dir), + 'ERROR: In count_items_in_directory(), requested input directory does not exist: ' + str(input_dir), ) if not os.path.isdir(input_dir): if os.path.isfile(input_dir): lt.error_and_raise( RuntimeError, - 'ERROR: In count_items_in_directory(), requested input path exists and is a file: ' - + str(input_dir), + 'ERROR: In count_items_in_directory(), requested input path exists and is a file: ' + str(input_dir), ) lt.error_and_raise( RuntimeError, - 'ERROR: In count_items_in_directory(), requested input directory is not a directory: ' - + str(input_dir), + 'ERROR: In count_items_in_directory(), requested input directory is not a directory: ' + str(input_dir), ) # Walk the directory contents to determine whether any contents are there. # The standard Unix files "." and ".." don't count as contents. @@ -353,16 +338,11 @@ def files_in_directory(input_dir, sort=True, files_only=False, recursive=False): # don't include any leading / relative_path = relative_path.lstrip("\\/") - scanned_files += [ - os.path.join(relative_path, file_name_ext) - for file_name_ext in file_names_exts - ] + scanned_files += [os.path.join(relative_path, file_name_ext) for file_name_ext in file_names_exts] # Ignore directories if not files_only: - scanned_files += [ - os.path.join(relative_path, dirname) for dirname in dirnames - ] + scanned_files += [os.path.join(relative_path, dirname) for dirname in dirnames] # Ignore standard Unix files. for file_relpath_name_ext in scanned_files: @@ -380,9 +360,7 @@ def files_in_directory(input_dir, sort=True, files_only=False, recursive=False): return file_list -def files_in_directory_with_associated_sizes( - input_dir, sort=True, follow_symlinks=True -): +def files_in_directory_with_associated_sizes(input_dir, sort=True, follow_symlinks=True): """ Returns a list [ [file1 size1], [file2, size2], ...] of files name_ext and associated sizes. If sort==True, then the list is sorted in order of ascending file name. @@ -412,11 +390,7 @@ def files_in_directory_with_associated_sizes( def files_in_directory_by_extension( - input_dir: str, - extensions: list[str], - sort=True, - case_sensitive=False, - recursive=False, + input_dir: str, extensions: list[str], sort=True, case_sensitive=False, recursive=False ): """Generates a list of { ext: [file1, file2, ...], ... }. Only returns the files with one of the given extensions. @@ -454,9 +428,7 @@ def files_in_directory_by_extension( else: search_extensions["." + ext] = iext - for file in files_in_directory( - input_dir, sort, files_only=True, recursive=recursive - ): + for file in files_in_directory(input_dir, sort, files_only=True, recursive=recursive): _, file_ext = os.path.splitext(file) ifile_ext = file_ext if case_sensitive else file_ext.lower() ext = search_extensions.get(ifile_ext) @@ -482,8 +454,7 @@ def create_file(input_dir_body_ext: str, error_on_exists=True, delete_if_exists= delete_file(input_dir_body_ext) elif error_on_exists: lt.error_and_raise( - RuntimeError, - f"Error: in create_file(), requested file already exists: '{input_dir_body_ext}'", + RuntimeError, f"Error: in create_file(), requested file already exists: '{input_dir_body_ext}'" ) else: return @@ -493,16 +464,12 @@ def create_file(input_dir_body_ext: str, error_on_exists=True, delete_if_exists= with open(input_dir_body_ext, "w"): pass except: - lt.error( - f"Error: in create_file(), failed to create file '{input_dir_body_ext}'" - ) + lt.error(f"Error: in create_file(), failed to create file '{input_dir_body_ext}'") raise # check for success if not file_exists(input_dir_body_ext): - lt.error_and_raise( - FileNotFoundError, "Error: in create_file(), failed to create file" - ) + lt.error_and_raise(FileNotFoundError, "Error: in create_file(), failed to create file") def delete_file(input_dir_body_ext, error_on_not_exists=True): @@ -515,14 +482,11 @@ def delete_file(input_dir_body_ext, error_on_not_exists=True): if error_on_not_exists: lt.error_and_raise( RuntimeError, - 'ERROR: In delete_file(), requested input path is not an existing file: ' - + str(input_dir_body_ext), + 'ERROR: In delete_file(), requested input path is not an existing file: ' + str(input_dir_body_ext), ) return lt.error_and_raise( - RuntimeError, - 'ERROR: In delete_file(), requested input path is not a file: ' - + str(input_dir_body_ext), + RuntimeError, 'ERROR: In delete_file(), requested input path is not a file: ' + str(input_dir_body_ext) ) try: os.remove(input_dir_body_ext) @@ -536,9 +500,7 @@ def delete_file(input_dir_body_ext, error_on_not_exists=True): raise # if this should NOT raise an exception then that should be documented, as well as the reason why it shouldn't ~BGB230119 -def delete_files_in_directory( - input_dir: str, globexp: str, error_on_dir_not_exists=True -): +def delete_files_in_directory(input_dir: str, globexp: str, error_on_dir_not_exists=True): """ Deletes files in the given directory matching the globexp. @@ -568,15 +530,13 @@ def delete_files_in_directory( if os.path.isfile(input_dir): lt.error_and_raise( RuntimeError, - 'ERROR: In delete_items_in_directory(), requested input path exists and is a file: ' - + str(input_dir), + 'ERROR: In delete_items_in_directory(), requested input path exists and is a file: ' + str(input_dir), ) if not os.path.isdir(input_dir): if error_on_dir_not_exists: lt.error_and_raise( RuntimeError, - 'ERROR: In delete_items_in_directory(), requested input directory does not exist: ' - + str(input_dir), + 'ERROR: In delete_items_in_directory(), requested input directory does not exist: ' + str(input_dir), ) # Delete the files. localized_globexp = os.path.join(input_dir, globexp) @@ -608,8 +568,7 @@ def create_directories_if_necessary(input_dir): if os.path.isfile(input_dir): lt.error_and_raise( RuntimeError, - 'ERROR: In create_directories_if_necessary(), requested input path exists and is a file: ' - + str(input_dir), + 'ERROR: In create_directories_if_necessary(), requested input path exists and is a file: ' + str(input_dir), ) # Create the directory and its parents, if they do not exist. if not os.path.isdir(input_dir): @@ -638,26 +597,19 @@ def create_subdir(base_dir: str, dir_name: str, error_if_exists=True): # Check input. if not os.path.exists(base_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In create_subdir(): Containing directory does not exist:\n\t' - + base_dir, + RuntimeError, 'ERROR: In create_subdir(): Containing directory does not exist:\n\t' + base_dir ) result_dir = os.path.join(base_dir, dir_name) if os.path.exists(result_dir): if error_if_exists: - lt.error_and_raise( - 'ERROR: In create_subdir(), Result directory exists:\n\t' + result_dir - ) + lt.error_and_raise('ERROR: In create_subdir(), Result directory exists:\n\t' + result_dir) else: pass else: try: os.mkdir(result_dir) except: - lt.error_and_raise( - RuntimeError, - 'ERROR: In create_subdir(): Could not create directory: ' + result_dir, - ) + lt.error_and_raise(RuntimeError, 'ERROR: In create_subdir(): Could not create directory: ' + result_dir) return result_dir @@ -665,9 +617,7 @@ def directories_in_directory(directory, sort=True): """ Returns a list of all the directory names contained within the input directory. """ - dir_list = [ - f for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f)) - ] + dir_list = [f for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))] if sort: dir_list.sort() return dir_list @@ -734,42 +684,22 @@ def default_output_path(file_path_name_ext: Optional[str] = None) -> str: # generate a new output path output_dir_name = 'output_' + datetime.now().strftime('%Y_%m_%d_%H%M') if file_path_name_ext != None and os.path.exists(file_path_name_ext): - file_dir = ( - file_path_name_ext - if os.path.isdir(file_path_name_ext) - else os.path.dirname(file_path_name_ext) - ) + file_dir = file_path_name_ext if os.path.isdir(file_path_name_ext) else os.path.dirname(file_path_name_ext) if file_dir != "": _output_paths[file_path_name_ext] = os.path.join(file_dir, output_dir_name) else: _output_paths[file_path_name_ext] = os.path.join( - 'common', - 'lib', - 'tool', - 'test', - 'data', - 'output', - 'file_tools', - output_dir_name, + 'common', 'lib', 'tool', 'test', 'data', 'output', 'file_tools', output_dir_name ) else: _output_paths[file_path_name_ext] = os.path.join( - 'common', - 'lib', - 'tool', - 'test', - 'data', - 'output', - 'file_tools', - output_dir_name, + 'common', 'lib', 'tool', 'test', 'data', 'output', 'file_tools', output_dir_name ) return _output_paths[file_path_name_ext] -def rename_file( - input_dir_body_ext: str, output_dir_body_ext: str, is_file_check_only=False -): +def rename_file(input_dir_body_ext: str, output_dir_body_ext: str, is_file_check_only=False): """Move a file from input to output. Verifies that input is a file, and that the output doesn't exist. We check @@ -793,27 +723,23 @@ def rename_file( if os.path.isdir(input_dir_body_ext): lt.error_and_raise( RuntimeError, - 'ERROR: In rename_file(), requested input path exists and is a directory: ' - + str(input_dir_body_ext), + 'ERROR: In rename_file(), requested input path exists and is a directory: ' + str(input_dir_body_ext), ) if not os.path.isfile(input_dir_body_ext): lt.error_and_raise( RuntimeError, - 'ERROR: In rename_file(), requested input file to rename does not exist: ' - + str(input_dir_body_ext), + 'ERROR: In rename_file(), requested input file to rename does not exist: ' + str(input_dir_body_ext), ) if not is_file_check_only: if os.path.isfile(output_dir_body_ext): lt.error_and_raise( RuntimeError, - 'ERROR: In rename_file(), requested output file exists and is a file: ' - + str(output_dir_body_ext), + 'ERROR: In rename_file(), requested output file exists and is a file: ' + str(output_dir_body_ext), ) if os.path.isdir(output_dir_body_ext): lt.error_and_raise( RuntimeError, - 'ERROR: In rename_file(), requested output file exists as a directory: ' - + str(output_dir_body_ext), + 'ERROR: In rename_file(), requested output file exists as a directory: ' + str(output_dir_body_ext), ) if os.path.exists(output_dir_body_ext): lt.error_and_raise( @@ -878,26 +804,20 @@ def copy_file(input_dir_body_ext: str, output_dir: str, output_body_ext: str = N if os.path.isdir(input_dir_body_ext): lt.error_and_raise( RuntimeError, - 'ERROR: In copy_file(), requested input path exists and is a directory: ' - + str(input_dir_body_ext), + 'ERROR: In copy_file(), requested input path exists and is a directory: ' + str(input_dir_body_ext), ) if not os.path.isfile(input_dir_body_ext): lt.error_and_raise( RuntimeError, - 'ERROR: In copy_file(), requested input file to copy does not exist: ' - + str(input_dir_body_ext), + 'ERROR: In copy_file(), requested input file to copy does not exist: ' + str(input_dir_body_ext), ) if os.path.isfile(output_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In copy_file(), requested output path exists and is a file: ' - + str(output_dir), + RuntimeError, 'ERROR: In copy_file(), requested output path exists and is a file: ' + str(output_dir) ) if not os.path.isdir(output_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In copy_file(), requested output directory does not exist: ' - + str(output_dir), + RuntimeError, 'ERROR: In copy_file(), requested output directory does not exist: ' + str(output_dir) ) # Assemble the output file path. if output_body_ext == None: @@ -908,9 +828,7 @@ def copy_file(input_dir_body_ext: str, output_dir: str, output_body_ext: str = N # Check output. if file_exists(output_dir_body_ext): lt.error_and_raise( - FileExistsError, - 'ERROR: In copy_file(), requested output file already exists: ' - + str(output_dir_body_ext), + FileExistsError, 'ERROR: In copy_file(), requested output file already exists: ' + str(output_dir_body_ext) ) # Copy the file. @@ -928,9 +846,7 @@ def copy_file(input_dir_body_ext: str, output_dir: str, output_body_ext: str = N return output_body_ext -def get_temporary_file( - suffix: str = None, dir: str = None, text: bool = True -) -> tuple[int, str]: +def get_temporary_file(suffix: str = None, dir: str = None, text: bool = True) -> tuple[int, str]: """ Creates a temporary file to write to. Example usage:: @@ -958,11 +874,7 @@ def get_temporary_file( import opencsp.common.lib.opencsp_path.opencsp_root_path as orp import opencsp.common.lib.tool.system_tools as st - default_possible_dirs = [ - orp.opencsp_temporary_dir(), - os.path.expanduser('~'), - tempfile.gettempdir(), - ] + default_possible_dirs = [orp.opencsp_temporary_dir(), os.path.expanduser('~'), tempfile.gettempdir()] possible_dirs = [dir] if dir != None else default_possible_dirs success = False @@ -976,15 +888,11 @@ def get_temporary_file( break if not success: - raise FileNotFoundError( - f"Could not create a tempory file in the directory '{dirname}'!" - ) + raise FileNotFoundError(f"Could not create a tempory file in the directory '{dirname}'!") return fd, fname -def merge_files( - in_files: list[str], out_file: str, overwrite=False, remove_in_files: bool = False -): +def merge_files(in_files: list[str], out_file: str, overwrite=False, remove_in_files: bool = False): """Merges the given files into a single file. The order of the input files is preserved in the output file. @@ -997,8 +905,7 @@ def merge_files( if not overwrite: if file_exists(out_file): lt.error_and_raise( - RuntimeError, - f"Error: in file_tools.merge_files: output file already exists \"{out_file}\"", + RuntimeError, f"Error: in file_tools.merge_files: output file already exists \"{out_file}\"" ) with open(out_file, 'wb') as wfd: for f in in_files: @@ -1028,9 +935,7 @@ def convert_shortcuts_to_symlinks(dirname: str): source_path_name_ext = shortcut_dir_path_ext while source_path_name_ext.endswith(".lnk"): try: - source_path_name_ext = shell.CreateShortCut( - shortcut_dir_path_ext - ).Targetpath + source_path_name_ext = shell.CreateShortCut(shortcut_dir_path_ext).Targetpath except Exception as e: lt.error(f"Failed to read the shortcut {shortcut_dir_path_ext}") raise @@ -1044,19 +949,11 @@ def convert_shortcuts_to_symlinks(dirname: str): link_name_ext = link_name_ext[: -len(".lnk")] link_path_name_ext = os.path.join(dirname, link_name_ext) if os.path.islink(link_path_name_ext): - lt.debug( - f"In file_tools.convert_shortcuts(): link {link_path_name_ext} already exists" - ) + lt.debug(f"In file_tools.convert_shortcuts(): link {link_path_name_ext} already exists") continue elif file_exists( - link_path_name_ext, - error_if_exists_as_dir=False, - follow_symlinks=True, - ) or directory_exists( - link_path_name_ext, - error_if_exists_as_file=False, - follow_symlinks=True, - ): + link_path_name_ext, error_if_exists_as_dir=False, follow_symlinks=True + ) or directory_exists(link_path_name_ext, error_if_exists_as_file=False, follow_symlinks=True): lt.error_and_raise( FileExistsError, "Error in file_tools.convert_shortcuts(): " @@ -1065,21 +962,13 @@ def convert_shortcuts_to_symlinks(dirname: str): # create the link directory_flag = "" - if directory_exists( - source_path_name_ext, - error_if_exists_as_file=False, - follow_symlinks=True, - ): + if directory_exists(source_path_name_ext, error_if_exists_as_file=False, follow_symlinks=True): # is a directory directory_flag = "/D" try: - subt.run( - f"mklink {directory_flag} {link_path_name_ext} {source_path_name_ext}" - ) + subt.run(f"mklink {directory_flag} {link_path_name_ext} {source_path_name_ext}") except Exception as e: - link_dir, link_name_ext, link_ext = path_components( - link_path_name_ext - ) + link_dir, link_name_ext, link_ext = path_components(link_path_name_ext) lt.error( f"Failed to create the symbolic link '{link_name_ext}{link_ext}' in '{link_dir}' to '{source_path_name_ext}'" ) @@ -1092,11 +981,7 @@ def convert_shortcuts_to_symlinks(dirname: str): def write_text_file( - description: str, - output_dir: str, - output_file_body: str, - output_string_list: list[any], - error_if_dir_not_exist=True, + description: str, output_dir: str, output_file_body: str, output_string_list: list[any], error_if_dir_not_exist=True ) -> str: """Writes a strings to a ".txt" file, with each string on a new line. @@ -1125,15 +1010,13 @@ def write_text_file( if os.path.isfile(output_dir): lt.error_and_raise( FileExistsError, - 'ERROR: In write_text_file(), requested output path exists and is a file: ' - + str(output_dir), + 'ERROR: In write_text_file(), requested output path exists and is a file: ' + str(output_dir), ) if error_if_dir_not_exist == True: if not directory_exists(output_dir): lt.error_and_raise( FileNotFoundError, - 'ERROR: In write_text_file(), requested output directory does not exist: ' - + str(output_dir), + 'ERROR: In write_text_file(), requested output directory does not exist: ' + str(output_dir), ) else: create_directories_if_necessary(output_dir) @@ -1161,11 +1044,7 @@ def read_text_file(input_dir_body_ext): """ # Check input. if not file_exists(input_dir_body_ext): - lt.error_and_raise( - IOError, - 'ERROR: In read_text_file(), file does not exist: ' - + str(input_dir_body_ext), - ) + lt.error_and_raise(IOError, 'ERROR: In read_text_file(), file does not exist: ' + str(input_dir_body_ext)) # Open and read the file. with open(input_dir_body_ext, newline='') as input_stream: lines = input_stream.readlines() @@ -1192,14 +1071,7 @@ def write_csv_file( # TODO remove "write_csv_file" in favor of "to_csv" to match naming convention from pandas if log_warning: lt.info("'write_csv_file' is deprecated in favor of 'to_csv'") - return to_csv( - description, - output_dir, - output_file_body, - heading_line, - data_lines, - error_if_dir_not_exist, - ) + return to_csv(description, output_dir, output_file_body, heading_line, data_lines, error_if_dir_not_exist) def to_csv( @@ -1226,16 +1098,13 @@ def to_csv( # Check status of output_dir. if os.path.isfile(output_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In write_csv_file(), requested output path exists and is a file: ' - + str(output_dir), + RuntimeError, 'ERROR: In write_csv_file(), requested output path exists and is a file: ' + str(output_dir) ) if error_if_dir_not_exist == True: if not directory_exists(output_dir): lt.error_and_raise( RuntimeError, - 'ERROR: In write_csv_file(), requested output directory does not exist: ' - + str(output_dir), + 'ERROR: In write_csv_file(), requested output directory does not exist: ' + str(output_dir), ) else: create_directories_if_necessary(output_dir) @@ -1338,16 +1207,13 @@ def write_dict_file( # Check status of output_dir. if os.path.isfile(output_dir): lt.error_and_raise( - RuntimeError, - 'ERROR: In write_dict_file(), requested output path exists and is a file: ' - + str(output_dir), + RuntimeError, 'ERROR: In write_dict_file(), requested output path exists and is a file: ' + str(output_dir) ) if error_if_dir_not_exist == True: if not directory_exists(output_dir): lt.error_and_raise( RuntimeError, - 'ERROR: In write_dict_file(), requested output directory does not exist: ' - + str(output_dir), + 'ERROR: In write_dict_file(), requested output directory does not exist: ' + str(output_dir), ) else: create_directories_if_necessary(output_dir) @@ -1383,9 +1249,7 @@ def add_row_to_output_dict(input_row: tuple[any, any], output_dict: dict): # Check input. if len(input_row) != 2: lt.error_and_raise( - RuntimeError, - 'ERROR: In add_row_to_output_dict(), input row is not of length 2: ', - input_row, + RuntimeError, 'ERROR: In add_row_to_output_dict(), input row is not of length 2: ', input_row ) # Fetch key and value strings read from file. key_str = input_row[0] @@ -1417,17 +1281,11 @@ def read_dict(input_dict_dir_body_ext): """ # Check input. if not file_exists(input_dict_dir_body_ext): - lt.error_and_raise( - RuntimeError, - 'ERROR: In read_dict(), file does not exist: ' - + str(input_dict_dir_body_ext), - ) + lt.error_and_raise(RuntimeError, 'ERROR: In read_dict(), file does not exist: ' + str(input_dict_dir_body_ext)) input_dir, input_body, input_ext = path_components(input_dict_dir_body_ext) if input_ext.lower() != '.csv': lt.error_and_raise( - RuntimeError, - 'ERROR: In read_dict(), input file is not a csv file: ' - + str(input_dict_dir_body_ext), + RuntimeError, 'ERROR: In read_dict(), input file is not a csv file: ' + str(input_dict_dir_body_ext) ) # Open and read the file. @@ -1491,7 +1349,4 @@ def read_dict(input_dict_dir_body_ext): if __name__ == '__main__': - print( - "directories_with_no_leading_underscore('.') = ", - directories_with_no_leading_underscore('.'), - ) + print("directories_with_no_leading_underscore('.') = ", directories_with_no_leading_underscore('.')) diff --git a/opencsp/common/lib/tool/hdf5_tools.py b/opencsp/common/lib/tool/hdf5_tools.py index 389144b8..1505a84d 100644 --- a/opencsp/common/lib/tool/hdf5_tools.py +++ b/opencsp/common/lib/tool/hdf5_tools.py @@ -118,9 +118,7 @@ def visitor(name: str, object: h5py.Group | h5py.Dataset): return group_names, file_names_and_shapes -def _create_dataset_path( - base_dir: str, h5_dataset_path_name: str, dataset_ext: str = ".txt" -): +def _create_dataset_path(base_dir: str, h5_dataset_path_name: str, dataset_ext: str = ".txt"): dataset_location, dataset_name, _ = ft.path_components(h5_dataset_path_name) dataset_path = ft.norm_path(os.path.join(base_dir, dataset_location)) ft.create_directories_if_necessary(dataset_path) @@ -158,10 +156,7 @@ def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): # Create the HDF5 output directory if ft.directory_exists(hdf5_dir): - lt.error_and_raise( - FileExistsError, - f"Error in hdf5_tools.unzip(): output directory {hdf5_dir} already exists!", - ) + lt.error_and_raise(FileExistsError, f"Error in hdf5_tools.unzip(): output directory {hdf5_dir} already exists!") ft.create_directories_if_necessary(hdf5_dir) # Get all of what may be strings or images from the h5 file @@ -177,16 +172,10 @@ def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): for i, possible_string_name in enumerate(possible_strings_names): dataset_name = possible_string_name.split("/")[-1] h5_val = load_hdf5_datasets([possible_string_name], norm_path)[dataset_name] - if ( - isinstance(h5_val, np.ndarray) - and h5_val.ndim <= 1 - and isinstance(h5_val.tolist()[0], str) - ): + if isinstance(h5_val, np.ndarray) and h5_val.ndim <= 1 and isinstance(h5_val.tolist()[0], str): h5_val = h5_val.tolist()[0] if isinstance(h5_val, str): - dataset_path_name_ext = _create_dataset_path( - hdf5_dir, possible_strings[i][0], ".txt" - ) + dataset_path_name_ext = _create_dataset_path(hdf5_dir, possible_strings[i][0], ".txt") with open(dataset_path_name_ext, "w") as fout: fout.write(h5_val) else: @@ -206,8 +195,7 @@ def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): # we assume shapes are at least 10x10 pixels and have an aspect ratio of at least 10:1 aspect_ratio = max(shape[0], shape[1]) / min(shape[0], shape[1]) if (shape[0] >= 10 and shape[1] >= 10) and (aspect_ratio < 10.001): - dataset_path_name_ext = _create_dataset_path( - hdf5_dir, possible_images[i][0], ".png") + dataset_path_name_ext = _create_dataset_path(hdf5_dir, possible_images[i][0], ".png") # assumed grayscale or RGB if (len(shape) == 2) or (shape[2] in [1, 3]): img = it.numpy_to_image(np_image) diff --git a/opencsp/common/lib/tool/image_tools.py b/opencsp/common/lib/tool/image_tools.py index 18f80553..636dce12 100644 --- a/opencsp/common/lib/tool/image_tools.py +++ b/opencsp/common/lib/tool/image_tools.py @@ -98,16 +98,7 @@ def numpy_to_image(arr: np.ndarray, rescale_or_clip='rescale', rescale_max=-1): image: PIL.Image The image representation of the input array. """ - allowed_int_types = [ - np.int8, - np.uint8, - np.int16, - np.uint16, - np.int32, - np.uint32, - np.int64, - np.uint64, - ] + allowed_int_types = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] # get the current integer size, and convert to integer type if not np.issubdtype(arr.dtype, np.integer): @@ -136,9 +127,7 @@ def numpy_to_image(arr: np.ndarray, rescale_or_clip='rescale', rescale_max=-1): return img -def images_are_identical( - image_1: np.ndarray, image_2: np.ndarray, tolerance_pixel: int -): +def images_are_identical(image_1: np.ndarray, image_2: np.ndarray, tolerance_pixel: int): """Checks if two images are identical. Args: @@ -225,11 +214,7 @@ def min_max_colors(image: np.ndarray) -> tuple[np.ndarray, np.ndarray]: max_per_row: np.ndarray = np.max(image, axis=1) min_colors: np.ndarray = np.min(min_per_row, axis=0) max_colors: np.ndarray = np.max(max_per_row, axis=0) - if ( - max_colors.ndim != 1 - or max_colors.shape[0] != nchannels - or max_colors.shape[0] <= 1 - ): + if max_colors.ndim != 1 or max_colors.shape[0] != nchannels or max_colors.shape[0] <= 1: lt.error_and_raise( RuntimeError, "Programmer error in image_tools.min_max(): " diff --git a/opencsp/common/lib/tool/list_tools.py b/opencsp/common/lib/tool/list_tools.py index a3df61d7..b0c69143 100644 --- a/opencsp/common/lib/tool/list_tools.py +++ b/opencsp/common/lib/tool/list_tools.py @@ -87,8 +87,7 @@ def binary_search( if len(sorted_list) == 0: if err_if_not_equal: lt.error_and_raise( - RuntimeError, - f"Error: in list_tools.binary_search: empty list, can't find search value {search_val}", + RuntimeError, f"Error: in list_tools.binary_search: empty list, can't find search value {search_val}" ) return -1, None @@ -112,9 +111,7 @@ def binary_search( # if split between two values, choose the closer one else: - if isinstance(sorted_list[left], (int, float, complex)) and not isinstance( - sorted_list[left], bool - ): + if isinstance(sorted_list[left], (int, float, complex)) and not isinstance(sorted_list[left], bool): # list instances are numbers, so we can evaluate which is closer lv = sorted_list[left] rv = sorted_list[right] @@ -143,10 +140,7 @@ def binary_search( def get_range( - data_keys: list[float], - data_values: list[_T], - key_subset_range: tuple[float, float], - exclude_outside_range=False, + data_keys: list[float], data_values: list[_T], key_subset_range: tuple[float, float], exclude_outside_range=False ) -> tuple[list[float], list[_T]]: """Select a subset of the data_keys[] and data_values[], limited to the given key_subset_range. Chooses the keys closest to the given range start and end points, inclusively. @@ -179,9 +173,7 @@ def get_range( # inclusive only? if exclude_outside_range: - while (start_idx < len(data_keys)) and ( - data_keys[start_idx] < key_subset_range[0] - ): + while (start_idx < len(data_keys)) and (data_keys[start_idx] < key_subset_range[0]): start_idx += 1 while (stop_idx > 0) and (data_keys[stop_idx - 1] > key_subset_range[1]): stop_idx -= 1 diff --git a/opencsp/common/lib/tool/log_tools.py b/opencsp/common/lib/tool/log_tools.py index 3a4a8578..5fd1a8ea 100644 --- a/opencsp/common/lib/tool/log_tools.py +++ b/opencsp/common/lib/tool/log_tools.py @@ -18,11 +18,7 @@ global_multiprocessing_logger: log.Logger = None -def logger( - log_dir_body_ext: str = None, - level: int = log.INFO, - delete_existing_log: bool = True, -) -> log.Logger: +def logger(log_dir_body_ext: str = None, level: int = log.INFO, delete_existing_log: bool = True) -> log.Logger: """Initialize logging for single-process programs. Creates a fresh log file, deleting the existing log file if it exists as indicated by delete_existing_log_file. @@ -129,9 +125,7 @@ def multiprocessing_logger(log_dir_body_ext=None, level=log.INFO) -> log.Logger: process_name = hn_match.groups()[0] + ":" + process_name # Set formatter. - formatter = log.Formatter( - f"[%(asctime)s| %(levelname)s| {process_name}] %(message)s" - ) + formatter = log.Formatter(f"[%(asctime)s| %(levelname)s| {process_name}] %(message)s") if log_dir_body_ext is not None: handler = log.FileHandler(log_dir_body_ext) handler.setFormatter(formatter) @@ -151,9 +145,7 @@ def multiprocessing_logger(log_dir_body_ext=None, level=log.INFO) -> log.Logger: return global_multiprocessing_logger -def _add_stream_handlers( - logger_: log.Logger, level: int, formatter: log.Formatter = None -) -> None: +def _add_stream_handlers(logger_: log.Logger, level: int, formatter: log.Formatter = None) -> None: """Adds streams to the given logger. Prints From https://stackoverflow.com/questions/16061641/python-logging-split-between-stdout-and-stderr diff --git a/opencsp/common/lib/tool/math_tools.py b/opencsp/common/lib/tool/math_tools.py index 4ae6bf4b..005d0d3a 100644 --- a/opencsp/common/lib/tool/math_tools.py +++ b/opencsp/common/lib/tool/math_tools.py @@ -27,11 +27,7 @@ def robust_arccos(x: float) -> float: """ if (x < (-1.0 - INVERSE_TRIG_TOLERANCE)) or (x > (1.0 + INVERSE_TRIG_TOLERANCE)): # TODO RCB: REPLACE THIS WITH LOG/EXCEPTION THROW. - print( - 'ERROR: In robust_arccos(), input value ' - + str(x) - + ' is out of valid range [-1,1].' - ) + print('ERROR: In robust_arccos(), input value ' + str(x) + ' is out of valid range [-1,1].') assert False elif x < -1.0: return np.pi @@ -47,11 +43,7 @@ def robust_arcsin(x: float) -> float: """ if (x < (-1.0 - INVERSE_TRIG_TOLERANCE)) or (x > (1.0 + INVERSE_TRIG_TOLERANCE)): # TODO RCB: REPLACE THIS WITH LOG/EXCEPTION THROW. - print( - 'ERROR: In robust_arcsin(), input value ' - + str(x) - + ' is out of valid range [-1,1].' - ) + print('ERROR: In robust_arcsin(), input value ' + str(x) + ' is out of valid range [-1,1].') assert False elif x < -1.0: return -(np.pi / 2.0) @@ -108,9 +100,7 @@ def overlapping_range( -------- - overlap (list[float,float]|list): The overlapping range. If there is no overlap, returns an empty list. """ - if (len(range1) == 0 or len(range2) == 0) or ( - range2[0] > range1[1] or range1[0] > range2[1] - ): + if (len(range1) == 0 or len(range2) == 0) or (range2[0] > range1[1] or range1[0] > range2[1]): if default is None: default = [] return default @@ -300,8 +290,7 @@ def rolling_average(data: list[float] | npt.NDArray[np.float_], window_size: int return data if window_size < 1: lt.error_and_raise( - ValueError, - f"Error: in math_tools.rolling_average(), window_size must be >= 1, but is {window_size}", + ValueError, f"Error: in math_tools.rolling_average(), window_size must be >= 1, but is {window_size}" ) window_size = min(window_size, len(data)) @@ -332,9 +321,7 @@ def rolling_average(data: list[float] | npt.NDArray[np.float_], window_size: int @strict_types -def lambda_symmetric_paraboloid( - focal_length: numbers.Number, -) -> Callable[[float, float], float]: +def lambda_symmetric_paraboloid(focal_length: numbers.Number) -> Callable[[float, float], float]: a = 1.0 / (4 * focal_length) return lambda x, y: a * (x**2 + y**2) # return FunctionXYContinuous(f"{a} * (x**2 + y**2)") diff --git a/opencsp/common/lib/tool/system_tools.py b/opencsp/common/lib/tool/system_tools.py index 87a7a9a6..5afee9e6 100644 --- a/opencsp/common/lib/tool/system_tools.py +++ b/opencsp/common/lib/tool/system_tools.py @@ -63,7 +63,5 @@ def mem_status(): return total / 10e8, used / 10e8, (avail) / 10e8 else: - total_memory, used_memory, free_memory = map( - int, os.popen('free -t -m').readlines()[-1].split()[1:] - ) + total_memory, used_memory, free_memory = map(int, os.popen('free -t -m').readlines()[-1].split()[1:]) return total_memory / 1000, used_memory / 1000, free_memory / 1000 diff --git a/opencsp/common/lib/tool/test/test_file_tools.py b/opencsp/common/lib/tool/test/test_file_tools.py index 458a7eb4..5b5cea97 100644 --- a/opencsp/common/lib/tool/test/test_file_tools.py +++ b/opencsp/common/lib/tool/test/test_file_tools.py @@ -35,9 +35,7 @@ def test_files_in_directory_recursive(self): self.assertListEqual(expected, files_name_ext) def test_files_in_directory_recursive_files_only(self): - files_name_ext = ft.files_in_directory( - self.data_dir, recursive=True, files_only=True - ) + files_name_ext = ft.files_in_directory(self.data_dir, recursive=True, files_only=True) files_name_ext = [f.replace("\\", "/") for f in files_name_ext] expected = [".dotfile", "a.a", "b.b", "d/c.c", "d/e/f.f"] self.assertListEqual(expected, files_name_ext) @@ -48,9 +46,7 @@ def test_files_in_directory_by_extension(self): self.assertDictEqual(expected, files_name_ext) def test_files_in_directory_by_extension_case_sensity(self): - files_name_ext = ft.files_in_directory_by_extension( - self.data_dir, [".a", ".B"], case_sensitive=True - ) + files_name_ext = ft.files_in_directory_by_extension(self.data_dir, [".a", ".B"], case_sensitive=True) expected = {".a": ["a.a"], ".B": []} self.assertDictEqual(expected, files_name_ext) @@ -101,24 +97,18 @@ def test_copy_and_delete(self): ft.delete_files_in_directory(test_dir, "*.tmp") ft.create_file(test_dir + "/copy_and_delete_a.tmp") - ft.copy_and_delete_file( - test_dir + "/copy_and_delete_a.tmp", test_dir + "/copy_and_delete_b.tmp" - ) + ft.copy_and_delete_file(test_dir + "/copy_and_delete_a.tmp", test_dir + "/copy_and_delete_b.tmp") self.assertFalse(ft.file_exists(test_dir + "/copy_and_delete_a.tmp")) self.assertTrue(ft.file_exists(test_dir + "/copy_and_delete_b.tmp")) ft.create_file(test_dir + "/copy_and_delete_c.tmp") - ft.copy_and_delete_file( - test_dir + "/copy_and_delete_c.tmp", test_dir + "/copy_and_delete_d.tmp" - ) + ft.copy_and_delete_file(test_dir + "/copy_and_delete_c.tmp", test_dir + "/copy_and_delete_d.tmp") self.assertFalse(ft.file_exists(test_dir + "/copy_and_delete_c.tmp")) self.assertTrue(ft.file_exists(test_dir + "/copy_and_delete_d.tmp")) # don't delete the source file if the source and destination are the same file ft.create_file(test_dir + "/copy_and_delete_e.tmp") - ft.copy_and_delete_file( - test_dir + "/copy_and_delete_e.tmp", test_dir + "/copy_and_delete_e.tmp" - ) + ft.copy_and_delete_file(test_dir + "/copy_and_delete_e.tmp", test_dir + "/copy_and_delete_e.tmp") self.assertTrue(ft.file_exists(test_dir + "/copy_and_delete_e.tmp")) diff --git a/opencsp/common/lib/tool/test/test_list_tools.py b/opencsp/common/lib/tool/test/test_list_tools.py index e1f74849..1ab70f89 100644 --- a/opencsp/common/lib/tool/test/test_list_tools.py +++ b/opencsp/common/lib/tool/test/test_list_tools.py @@ -147,27 +147,19 @@ def test_get_range_approximate(self): self.assertEqual(closest_vals_back[0], [4]) self.assertEqual(closest_vals_back[1], ['e']) - middle_vals_exclusive = listt.get_range( - lvals, tvals, [1.2, 2.8], exclude_outside_range=True - ) + middle_vals_exclusive = listt.get_range(lvals, tvals, [1.2, 2.8], exclude_outside_range=True) self.assertEqual(middle_vals_exclusive[0], [2]) self.assertEqual(middle_vals_exclusive[1], ['c']) - empty_vals_front = listt.get_range( - lvals, tvals, [-100, -1], exclude_outside_range=True - ) + empty_vals_front = listt.get_range(lvals, tvals, [-100, -1], exclude_outside_range=True) self.assertEqual(len(empty_vals_front[0]), 0) self.assertEqual(len(empty_vals_front[1]), 0) - empty_vals_middle = listt.get_range( - lvals, tvals, [0.1, 0.9], exclude_outside_range=True - ) + empty_vals_middle = listt.get_range(lvals, tvals, [0.1, 0.9], exclude_outside_range=True) self.assertEqual(len(empty_vals_middle[0]), 0) self.assertEqual(len(empty_vals_middle[1]), 0) - empty_vals_back = listt.get_range( - lvals, tvals, [6, 100], exclude_outside_range=True - ) + empty_vals_back = listt.get_range(lvals, tvals, [6, 100], exclude_outside_range=True) self.assertEqual(len(empty_vals_back[0]), 0) self.assertEqual(len(empty_vals_back[1]), 0) @@ -184,26 +176,8 @@ def test_natural_sort_just_numbers(self): self.assertEqual(actual, expected) def test_natural_sort_strs_nums(self): - lvals = [ - "c_4_c", - "c_3_c", - "c_3_a", - "b_05_b", - "b_4_b", - "b_0_c", - "1_a_2", - "1_a_1", - ] - expected = [ - "1_a_1", - "1_a_2", - "b_0_c", - "b_4_b", - "b_05_b", - "c_3_a", - "c_3_c", - "c_4_c", - ] + lvals = ["c_4_c", "c_3_c", "c_3_a", "b_05_b", "b_4_b", "b_0_c", "1_a_2", "1_a_1"] + expected = ["1_a_1", "1_a_2", "b_0_c", "b_4_b", "b_05_b", "c_3_a", "c_3_c", "c_4_c"] actual = listt.natural_sort(lvals) self.assertEqual(actual, expected) diff --git a/opencsp/common/lib/tool/test/test_log_tools.py b/opencsp/common/lib/tool/test/test_log_tools.py index 60e43937..a92dc60a 100644 --- a/opencsp/common/lib/tool/test/test_log_tools.py +++ b/opencsp/common/lib/tool/test/test_log_tools.py @@ -23,9 +23,7 @@ class TestLogTools(unittest.TestCase): @classmethod def setUpClass(cls) -> None: - cls.out_dir = os.path.join( - 'common', 'lib', 'test', 'data', 'output', 'tool', 'log_tools' - ) + cls.out_dir = os.path.join('common', 'lib', 'test', 'data', 'output', 'tool', 'log_tools') ft.create_directories_if_necessary(cls.out_dir) ft.delete_files_in_directory(cls.out_dir, "*") super().setUpClass() @@ -35,26 +33,10 @@ def setUp(self) -> None: self.log_dir_body_ext = os.path.join(self.out_dir, test_method + ".txt") def proc_exec(self, func_name1: str, func_name2: str = None): - with subprocess.Popen( - [ - sys.executable, - __file__, - "--funcname", - func_name1, - "--logname", - self.log_dir_body_ext, - ] - ): + with subprocess.Popen([sys.executable, __file__, "--funcname", func_name1, "--logname", self.log_dir_body_ext]): if func_name2 != None: with subprocess.Popen( - [ - sys.executable, - __file__, - "--funcname", - func_name2, - "--logname", - self.log_dir_body_ext, - ] + [sys.executable, __file__, "--funcname", func_name2, "--logname", self.log_dir_body_ext] ): pass @@ -71,10 +53,7 @@ def _log_single_process_logger(self, logname): def test_single_process_logger(self): self.proc_exec("_log_single_process_logger") log_contents = self.get_log_contents() - self.assertTrue( - "Hello, world!" in log_contents, - f"Can't find hello log in log contents:\n\t\"{log_contents}\"", - ) + self.assertTrue("Hello, world!" in log_contents, f"Can't find hello log in log contents:\n\t\"{log_contents}\"") def _log_single_process_dont_delete(self, logname): lt.logger(logname, delete_existing_log=False) @@ -86,13 +65,9 @@ def test_single_process_dont_delete(self): self.proc_exec("_log_single_process_dont_delete") log_contents = self.get_log_contents() + self.assertTrue("Hello, world!" in log_contents, f"Can't find hello log in log contents:\n\t\"{log_contents}\"") self.assertTrue( - "Hello, world!" in log_contents, - f"Can't find hello log in log contents:\n\t\"{log_contents}\"", - ) - self.assertTrue( - "Goodbye, world!" in log_contents, - f"Can't find goodbye log in log contents:\n\t\"{log_contents}\"", + "Goodbye, world!" in log_contents, f"Can't find goodbye log in log contents:\n\t\"{log_contents}\"" ) def _log_multiprocess_logger1(self, logname): @@ -110,17 +85,10 @@ def test_multiprocess_logger(self): self.proc_exec("_log_multiprocess_logger1", "_log_multiprocess_logger2") log_contents = self.get_log_contents() + self.assertTrue("Hello, world!" in log_contents, f"Can't find hello log in log contents:\n\t\"{log_contents}\"") + self.assertTrue("other process" in log_contents, f"Can't find other log in log contents:\n\t\"{log_contents}\"") self.assertTrue( - "Hello, world!" in log_contents, - f"Can't find hello log in log contents:\n\t\"{log_contents}\"", - ) - self.assertTrue( - "other process" in log_contents, - f"Can't find other log in log contents:\n\t\"{log_contents}\"", - ) - self.assertTrue( - "Goodbye, world!" in log_contents, - f"Can't find goodbye log in log contents:\n\t\"{log_contents}\"", + "Goodbye, world!" in log_contents, f"Can't find goodbye log in log contents:\n\t\"{log_contents}\"" ) def _log_log_level_screening(self, logname): @@ -132,13 +100,9 @@ def test_log_level_screening(self): self.proc_exec("_log_log_level_screening") log_contents = self.get_log_contents() - self.assertTrue( - "Hello, world!" in log_contents, - f"Can't find hello log in log contents:\n\t\"{log_contents}\"", - ) + self.assertTrue("Hello, world!" in log_contents, f"Can't find hello log in log contents:\n\t\"{log_contents}\"") self.assertFalse( - "Goodbye, world!" in log_contents, - "Found goodbye log in log contents when it shouldn't be there", + "Goodbye, world!" in log_contents, "Found goodbye log in log contents when it shouldn't be there" ) def _log_set_log_level(self, logname): @@ -150,13 +114,9 @@ def test_set_log_level(self): self.proc_exec("_log_set_log_level") log_contents = self.get_log_contents() + self.assertTrue("Hello, world!" in log_contents, f"Can't find hello log in log contents:\n\t\"{log_contents}\"") self.assertTrue( - "Hello, world!" in log_contents, - f"Can't find hello log in log contents:\n\t\"{log_contents}\"", - ) - self.assertTrue( - "Goodbye, world!" in log_contents, - f"Can't find goodbye log in log contents:\n\t\"{log_contents}\"", + "Goodbye, world!" in log_contents, f"Can't find goodbye log in log contents:\n\t\"{log_contents}\"" ) def _log_error_and_raise(self, logname): @@ -170,10 +130,7 @@ def test_error_and_raise(self): self.proc_exec("_log_error_and_raise") log_contents = self.get_log_contents() - self.assertTrue( - "Error, world!" in log_contents, - f"Can't find error log in log contents:\n\t\"{log_contents}\"", - ) + self.assertTrue("Error, world!" in log_contents, f"Can't find error log in log contents:\n\t\"{log_contents}\"") self.assertTrue( "RuntimeError encountered" in log_contents, f"Can't find evidence of RuntimeError in log contents:\n\t\"{log_contents}\"", @@ -183,9 +140,7 @@ def test_error_and_raise(self): if __name__ == '__main__': import argparse - parser = argparse.ArgumentParser( - prog=__file__.rstrip(".py"), description='Testing log tools' - ) + parser = argparse.ArgumentParser(prog=__file__.rstrip(".py"), description='Testing log tools') parser.add_argument('--logname', help="The name of the log file to log to.") parser.add_argument('--funcname', help="Calls the given function") args = parser.parse_args() diff --git a/opencsp/common/lib/tool/test/test_math_tools.py b/opencsp/common/lib/tool/test/test_math_tools.py index a29aafe0..5e84fc92 100644 --- a/opencsp/common/lib/tool/test/test_math_tools.py +++ b/opencsp/common/lib/tool/test/test_math_tools.py @@ -39,13 +39,9 @@ def test_rolling_average_simple(self): # average of all ones should be all ones in10 = np.ones((10)) out10 = mt.rolling_average(in10, 5) - self.assertAlmostEqual( - np.sum(in10 - out10), 0, msg=f"A: out10 is not all 1's: {out10}" - ) + self.assertAlmostEqual(np.sum(in10 - out10), 0, msg=f"A: out10 is not all 1's: {out10}") out10 = mt.rolling_average(in10, 6) - self.assertAlmostEqual( - np.sum(in10 - out10), 0, msg=f"B: out10 is not all 1's: {out10}" - ) + self.assertAlmostEqual(np.sum(in10 - out10), 0, msg=f"B: out10 is not all 1's: {out10}") def test_rolling_average_full(self): # average of all ones should be all ones diff --git a/opencsp/common/lib/tool/test/test_typing_tools.py b/opencsp/common/lib/tool/test/test_typing_tools.py index 68771983..1825ec51 100644 --- a/opencsp/common/lib/tool/test/test_typing_tools.py +++ b/opencsp/common/lib/tool/test/test_typing_tools.py @@ -46,13 +46,9 @@ def test_default(self): self.assertEqual("a", tt.default(self.raises_runtime_error, "a")) with self.assertRaises(RuntimeError): tt.default(self.raises_runtime_error, self.raises_value_error) - self.assertEqual( - None, tt.default(self.raises_runtime_error, self.raises_value_error, None) - ) + self.assertEqual(None, tt.default(self.raises_runtime_error, self.raises_value_error, None)) self.assertEqual("a", tt.default("a", self.raises_value_error)) - self.assertEqual( - "a", tt.default(None, lambda: lval[4], dval[3], lambda: dval[4], "a") - ) + self.assertEqual("a", tt.default(None, lambda: lval[4], dval[3], lambda: dval[4], "a")) with self.assertRaises(ValueError): self.assertEqual("a", tt.default("a")) diff --git a/opencsp/common/lib/tool/time_date_tools.py b/opencsp/common/lib/tool/time_date_tools.py index 5a64e6ce..c48ea909 100644 --- a/opencsp/common/lib/tool/time_date_tools.py +++ b/opencsp/common/lib/tool/time_date_tools.py @@ -79,19 +79,9 @@ def to_datetime(ymdhmsz: list[int, int, int, int, int, float, int] | datetime): if isinstance(ymdhmsz, datetime): return ymdhmsz if len(ymdhmsz) != 7: - raise RuntimeError( - f"Unexpected datetime representation for the input list {ymdhmsz}!" - ) + raise RuntimeError(f"Unexpected datetime representation for the input list {ymdhmsz}!") tzinfo = timezone(timedelta(hours=ymdhmsz[6])) - return datetime( - ymdhmsz[0], - ymdhmsz[1], - ymdhmsz[2], - ymdhmsz[3], - ymdhmsz[4], - ymdhmsz[5], - tzinfo=tzinfo, - ) + return datetime(ymdhmsz[0], ymdhmsz[1], ymdhmsz[2], ymdhmsz[3], ymdhmsz[4], ymdhmsz[5], tzinfo=tzinfo) def from_datetime(dt: datetime) -> list[int, int, int, int, int, float, int]: @@ -139,16 +129,11 @@ def tz(name_or_offset: str | float | timedelta): return tz # failed to match - lt.error_and_raise( - RuntimeError, - f"Error: in time_date_tools.tz(), failed to find a timezone with the name {name}", - ) + lt.error_and_raise(RuntimeError, f"Error: in time_date_tools.tz(), failed to find a timezone with the name {name}") def add_seconds_to_ymdhmsz( - ymdhmsz: list[int, int, int, int, int, float, int], - time_sec: float, - ignore_legacy=None, + ymdhmsz: list[int, int, int, int, int, float, int], time_sec: float, ignore_legacy=None ) -> list[int, int, int, int, int, float, int]: ignore_legacy = tdt_ignore_legacy if (ignore_legacy == None) else ignore_legacy if ignore_legacy == False: @@ -179,18 +164,14 @@ def add_seconds_to_ymdhmsz( second -= 60 second += time_sec else: - print( - 'ERROR: In add_seconds_to_ymdhms(), rolling over a day boundary not implemented yet.' - ) + print('ERROR: In add_seconds_to_ymdhms(), rolling over a day boundary not implemented yet.') assert False # Return. return [year, month, day, hour, minute, second, zone] def subtract_seconds_from_ymdhmsz( - ymdhmsz: list[int, int, int, int, int, float, int], - time_sec: float, - ignore_legacy=None, + ymdhmsz: list[int, int, int, int, int, float, int], time_sec: float, ignore_legacy=None ) -> list[int, int, int, int, int, float, int]: ignore_legacy = tdt_ignore_legacy if (ignore_legacy == None) else ignore_legacy if ignore_legacy == False: diff --git a/opencsp/common/lib/tool/typing_tools.py b/opencsp/common/lib/tool/typing_tools.py index 17b3e70b..7b6f7329 100644 --- a/opencsp/common/lib/tool/typing_tools.py +++ b/opencsp/common/lib/tool/typing_tools.py @@ -8,9 +8,7 @@ T = TypeVar("T") -def default( - primary: T | Callable[[], T] | None, *default: T | Callable[[], T] | None -) -> T | None: +def default(primary: T | Callable[[], T] | None, *default: T | Callable[[], T] | None) -> T | None: # TODO should this be in a different module? """Get the default value if the primary value is None or if the value is a callable that raises an error. @@ -78,8 +76,7 @@ def default( # default(dval[3]) lt.error_and_raise( ValueError, - "Error in typing_tools.default(): " - + "at least one alternative for a default value must be provided.", + "Error in typing_tools.default(): " + "at least one alternative for a default value must be provided.", ) for val in all_vals: @@ -178,8 +175,7 @@ def wrapper(*posargs, **kwargs): if argname in kwargtypes: if ( arg != None - and type(arg).__name__ - != kwargtypes[argname] # for cases of types represented as strings + and type(arg).__name__ != kwargtypes[argname] # for cases of types represented as strings if type(kwargtypes[argname]) == str else not isinstance(arg, kwargtypes[argname]) ): @@ -190,8 +186,7 @@ def wrapper(*posargs, **kwargs): if kw in kwargtypes: if ( kwargs[kw] != None - and type(kwargs[kw]).__name__ - != kwargtypes[kw] # for cases of types represented as strings + and type(kwargs[kw]).__name__ != kwargtypes[kw] # for cases of types represented as strings if type(kwargtypes[kw]) == str else not isinstance(kwargs[kw], kwargtypes[kw]) ): diff --git a/opencsp/common/lib/uas/Scan.py b/opencsp/common/lib/uas/Scan.py index 0ebcaca6..370c19fa 100644 --- a/opencsp/common/lib/uas/Scan.py +++ b/opencsp/common/lib/uas/Scan.py @@ -81,11 +81,7 @@ def construct_scan_given_segments_of_interest(list_of_xyz_segments, scan_paramet fly_backward = False else: fly_backward = True - passes.append( - sp.construct_scan_pass_given_segment_of_interest( - segment_xyz, fly_backward, scan_parameters - ) - ) + passes.append(sp.construct_scan_pass_given_segment_of_interest(segment_xyz, fly_backward, scan_parameters)) idx += 1 # Construct scan. scan = Scan(passes) @@ -114,11 +110,7 @@ def construct_scan_given_UFACET_scan_passes(ufacet_scan_pass_list, scan_paramete fly_backward = False else: fly_backward = True - passes.append( - sp.construct_scan_pass_given_UFACET_scan_pass( - ufacet_scan_pass, fly_backward, scan_parameters - ) - ) + passes.append(sp.construct_scan_pass_given_UFACET_scan_pass(ufacet_scan_pass, fly_backward, scan_parameters)) idx += 1 # Construct scan. scan = Scan(passes) diff --git a/opencsp/common/lib/uas/ScanPass.py b/opencsp/common/lib/uas/ScanPass.py index cadce43a..e2c00d9a 100644 --- a/opencsp/common/lib/uas/ScanPass.py +++ b/opencsp/common/lib/uas/ScanPass.py @@ -24,31 +24,23 @@ def __init__(self): # Defining members. self._segment_dict = None # Dictionary of defining parameters. Only set if this is a segment scan. - self._ufacet_scan_pass = ( - None # A UfacetScanPass object. Only set if this is a UFACET scan. - ) + self._ufacet_scan_pass = None # A UfacetScanPass object. Only set if this is a UFACET scan. self._lead_in = None # m. Do not access this member externally; use lead_in() function instead. self._run_past = None # m. Do not access this member externally; use run_past() function instead. - self._locale = ( - None # Do not access this member externally; use locale() function instead. - ) + self._locale = None # Do not access this member externally; use locale() function instead. # ACCESS def core_waypoints(self): # Core waypoints do not include the lead-in and run-past extra travel distance. if self._core_waypoint_list == None: - print( - 'ERROR: In ScanPass.core_waypoints(), attempt to fetch unset _core_waypoint_list.' - ) + print('ERROR: In ScanPass.core_waypoints(), attempt to fetch unset _core_waypoint_list.') assert False return self._core_waypoint_list def waypoints(self): if self._waypoint_list == None: - print( - 'ERROR: In ScanPass.waypoints(), attempt to fetch unset _waypoint_list.' - ) + print('ERROR: In ScanPass.waypoints(), attempt to fetch unset _waypoint_list.') assert False return self._waypoint_list @@ -60,9 +52,7 @@ def segment_dict(self): def ufacet_scan_pass(self): if self._ufacet_scan_pass == None: - print( - 'In ScanPass.ufacet_scan_pass(), attempt to fetch unset _ufacet_scan_pass.' - ) + print('In ScanPass.ufacet_scan_pass(), attempt to fetch unset _ufacet_scan_pass.') assert False return self._ufacet_scan_pass @@ -88,17 +78,13 @@ def locale(self): # MODIFICATION - def set_core_waypoints_given_segment_of_interest( - self, segment_xyz, fly_backward, raster_scan_parameters - ): + def set_core_waypoints_given_segment_of_interest(self, segment_xyz, fly_backward, raster_scan_parameters): # This routine does not consider ground clearance. # It assumes that the specified eta and relative_z will yield a collisiion-free result. # # Check input. if raster_scan_parameters['eta'] > 0: - print( - 'ERROR: In ScanPass.set_waypoints_given_segment_of_interest(), Postive gaze angle encountered.' - ) + print('ERROR: In ScanPass.set_waypoints_given_segment_of_interest(), Postive gaze angle encountered.') assert False # Set segment-specific data member. self._segment_dict = {} @@ -151,9 +137,7 @@ def set_core_waypoints_given_segment_of_interest( start_eta = eta end_eta = eta # Construct way points. - start_wpt = wp.WayPoint( - locale, start_xyz, theta, start_eta, stop=False, speed=speed - ) + start_wpt = wp.WayPoint(locale, start_xyz, theta, start_eta, stop=False, speed=speed) end_wpt = wp.WayPoint(locale, end_xyz, theta, end_eta, stop=False, speed=speed) # Set waypoint list data member. self._core_waypoint_list = [start_wpt, end_wpt] @@ -169,14 +153,10 @@ def set_core_waypoints_from_UFACET_scan_pass(self, ufacet_scan_pass): def set_waypoints_with_margin(self, scan_parameters): # Check input. if self._core_waypoint_list == None: - print( - 'In ScanPass.set_waypoints_with_margin(), attempt to fetch unset _core_waypoint_list.' - ) + print('In ScanPass.set_waypoints_with_margin(), attempt to fetch unset _core_waypoint_list.') assert False if len(self._core_waypoint_list) < 2: - print( - 'In ScanPass.set_waypoints_with_margin(), _core_waypoint_list had fewer than two elements.' - ) + print('In ScanPass.set_waypoints_with_margin(), _core_waypoint_list had fewer than two elements.') assert False # Fetch control parameters. lead_in = scan_parameters['lead_in'] @@ -190,9 +170,7 @@ def set_waypoints_with_margin(self, scan_parameters): x0 = xyz0[0] y0 = xyz0[1] z0 = xyz0[2] - wptN = self._core_waypoint_list[ - -1 - ] # There might be multiple waypoints in one pass. + wptN = self._core_waypoint_list[-1] # There might be multiple waypoints in one pass. xyzN = wptN.xyz xN = xyzN[0] yN = xyzN[1] @@ -261,12 +239,8 @@ def set_waypoints_with_margin(self, scan_parameters): ) assert False # Construct start and end waypoints. - start_wpt = wp.WayPoint( - locale0, start_xyz, theta0, start_eta, stop=False, speed=speed0 - ) - end_wpt = wp.WayPoint( - localeN, end_xyz, thetaN, end_eta, stop=False, speed=speedN - ) + start_wpt = wp.WayPoint(locale0, start_xyz, theta0, start_eta, stop=False, speed=speed0) + end_wpt = wp.WayPoint(localeN, end_xyz, thetaN, end_eta, stop=False, speed=speedN) # Produce updated waypoint list. if len(self._core_waypoint_list) == 2: waypoint_list = [ @@ -289,9 +263,7 @@ def draw(self, view, scan_pass_styles): # Fetch segment of interest. segment_xyz = self._segment_dict['segment_xyz'] # Draw segment. - view.draw_xyz_list( - segment_xyz, style=scan_pass_style.segment_of_interest_style - ) + view.draw_xyz_list(segment_xyz, style=scan_pass_style.segment_of_interest_style) # Core waypoint segment. if scan_pass_style.draw_core_segment: @@ -300,9 +272,7 @@ def draw(self, view, scan_pass_styles): core_wptN = self.core_waypoints()[-1] core_segment_xyz = [core_wpt0.xyz, core_wptN.xyz] # Draw segment. - view.draw_xyz_list( - core_segment_xyz, style=scan_pass_style.core_segment_style - ) + view.draw_xyz_list(core_segment_xyz, style=scan_pass_style.core_segment_style) # ------------------------------------------------------------------------------------------------------- @@ -310,13 +280,9 @@ def draw(self, view, scan_pass_styles): # -def construct_scan_pass_given_segment_of_interest( - segment_xyz, fly_backward, scan_parameters -): +def construct_scan_pass_given_segment_of_interest(segment_xyz, fly_backward, scan_parameters): scan_pass = ScanPass() - scan_pass.set_core_waypoints_given_segment_of_interest( - segment_xyz, fly_backward, scan_parameters - ) + scan_pass.set_core_waypoints_given_segment_of_interest(segment_xyz, fly_backward, scan_parameters) scan_pass.set_waypoints_with_margin(scan_parameters) return scan_pass diff --git a/opencsp/common/lib/uas/WayPoint.py b/opencsp/common/lib/uas/WayPoint.py index b1761954..bb6c71e1 100644 --- a/opencsp/common/lib/uas/WayPoint.py +++ b/opencsp/common/lib/uas/WayPoint.py @@ -42,9 +42,7 @@ def set_longitude_latitude(self): self.lon = lon self.lat = lat else: - print( - 'In WayPoint. set_longitude_latitude(), unexpected locale encountered.' - ) + print('In WayPoint. set_longitude_latitude(), unexpected locale encountered.') assert False def heading_deg(self): diff --git a/opencsp/test/test_DocStringsExist.py b/opencsp/test/test_DocStringsExist.py index 460ad49d..d95ce00b 100644 --- a/opencsp/test/test_DocStringsExist.py +++ b/opencsp/test/test_DocStringsExist.py @@ -16,9 +16,7 @@ def test_docstrings_exist_for_methods(): method_list = [ func for func in dir(class_module) - if callable(getattr(class_module, func)) - and not func.startswith("__") - and not func.startswith("_") + if callable(getattr(class_module, func)) and not func.startswith("__") and not func.startswith("_") ] for method in method_list: From 6a9b9c9c32df1b5c88d49cbccb1c623f517f71eb Mon Sep 17 00:00:00 2001 From: Evan Harvey Date: Thu, 21 Mar 2024 16:44:13 -0600 Subject: [PATCH 2/5] .github/workflows: Ensure pyproject.toml is used --- .github/workflows/format.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 20657050..cea75269 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -32,4 +32,5 @@ jobs: - name: Black formatting check run: | pip install black - black . -S -C --check --diff \ No newline at end of file + cd OpenCSP + black . -S -C --check --diff --config ./pyproject.toml \ No newline at end of file From 97c24d4f1b4769b830f0c539d66462a150109b0a Mon Sep 17 00:00:00 2001 From: Evan Harvey Date: Thu, 21 Mar 2024 16:44:33 -0600 Subject: [PATCH 3/5] Disable test_with_attrfile --- opencsp/common/lib/render/test/test_ImageAttributeParser.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/opencsp/common/lib/render/test/test_ImageAttributeParser.py b/opencsp/common/lib/render/test/test_ImageAttributeParser.py index 6df205ca..91bc498e 100644 --- a/opencsp/common/lib/render/test/test_ImageAttributeParser.py +++ b/opencsp/common/lib/render/test/test_ImageAttributeParser.py @@ -3,6 +3,7 @@ import os import unittest import unittest.mock +import pytest import opencsp.common.lib.render.ImageAttributeParser as iap import opencsp.common.lib.tool.file_tools as ft @@ -48,6 +49,7 @@ def test_has_contents(self): parser = iap.ImageAttributeParser(notes="") self.assertEqual(True, parser.has_contents()) + @pytest.mark.skip("See https://github.com/sandialabs/OpenCSP/issues/3") def test_with_attrfile(self): """Load all values from the associated attributes file. Use the new current_image_source value.""" parser = iap.ImageAttributeParser(current_image_source=self.img_file) From d7572feb83a260ec453f8ca71e5e198e8012f449 Mon Sep 17 00:00:00 2001 From: Evan Harvey Date: Thu, 21 Mar 2024 16:46:40 -0600 Subject: [PATCH 4/5] enable docs-check --- .github/workflows/docs_check.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docs_check.yml b/.github/workflows/docs_check.yml index b4978b6e..eb03f602 100644 --- a/.github/workflows/docs_check.yml +++ b/.github/workflows/docs_check.yml @@ -1,13 +1,12 @@ name: github-DOCS on: - push - #pull_request: - # paths-ignore: - # - '**/*.rst' - # - '**/*.md' - # - 'doc/**' - # types: [ opened, reopened, synchronize ] + pull_request: + paths-ignore: + - '**/*.rst' + - '**/*.md' + - 'doc/**' + types: [ opened, reopened, synchronize ] permissions: contents: none From 17fad8cf0d86026c933078a59429a6450cc221ae Mon Sep 17 00:00:00 2001 From: Evan Harvey Date: Thu, 21 Mar 2024 16:54:00 -0600 Subject: [PATCH 5/5] doc: More fixes --- doc/source/contributing.rst | 1 + doc/source/example/sofast/config.rst | 21 ++++++--------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 88f59f30..1934c4a5 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -98,6 +98,7 @@ NOTE, the following pre-commit hook can be added to automatically apply black to commits: :: + $ cat .git/hooks/pre-commit for FILE in $(git diff --cached --name-only | egrep '.*\.py$') do diff --git a/doc/source/example/sofast/config.rst b/doc/source/example/sofast/config.rst index b3c8a2ed..10529b6f 100644 --- a/doc/source/example/sofast/config.rst +++ b/doc/source/example/sofast/config.rst @@ -1,18 +1,9 @@ -Multi Facet Data Process -======================== - -.. currentmodule:: example.sofast_fringe.example_multi_facet_data_process - -.. automodule:: example.sofast_fringe.example_multi_facet_data_process - :members: - :show-inheritance: - Single Facet Data Process ========================= -.. currentmodule:: example.sofast_fringe.example_single_facet_data_process +.. currentmodule:: example.sofast_fringe.example_process_single_facet -.. automodule:: example.sofast_fringe.example_single_facet_data_process +.. automodule:: example.sofast_fringe.example_process_single_facet :members: :show-inheritance: @@ -20,17 +11,17 @@ Single Facet Data Process Undefined Facet Data Process ============================ -.. currentmodule:: example.sofast_fringe.example_undefined_facet_data_process +.. currentmodule:: example.sofast_fringe.example_process_facet_ensemble -.. automodule:: example.sofast_fringe.example_undefined_facet_data_process +.. automodule:: example.sofast_fringe.example_process_facet_ensemble :members: :show-inheritance: View Camera Distortion ====================== -.. currentmodule:: example.sofast_fringe.example_view_camera_distortion +.. currentmodule:: example.sofast_fringe.example_calibration_camera_pose -.. automodule:: example.sofast_fringe.example_view_camera_distortion +.. automodule:: example.sofast_fringe.example_calibration_camera_pose :members: :show-inheritance: