From 804121dd8a2a62d1fa8d9f04bcee482867b17841 Mon Sep 17 00:00:00 2001 From: jurjen93 Date: Sun, 4 Apr 2021 20:20:25 +0200 Subject: [PATCH] updates --- .gitignore | 4 +- README.md | 4 +- make_movie.py | 122 +++++++++++++++++++++++++++--------- poster/scripts/imaging.py | 29 +++++---- video/scripts/moviemaker.py | 18 +++--- 5 files changed, 123 insertions(+), 54 deletions(-) diff --git a/.gitignore b/.gitignore index a867d8a..d25b7ce 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,6 @@ __pycache__ #git .git #csv files -*.csv \ No newline at end of file +*.csv +#files not for github +make_movie_highres.py \ No newline at end of file diff --git a/README.md b/README.md index 7a23121..543aa90 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ You need to clone this repo with: ```git clone https://github.com/jurjen93/advanced_astro_visualiziation.git``` ### Catalogue csv file -You need to have a catalogue with sources. We have an example given in the folder 'catalogue'.\ +You need to have a catalogue with sources for the poster (for the video it is optional). We have an example given in the folder 'catalogue'.\ Use the following fields: * ```source_id``` -> id of the source * ```RA``` -> right ascension of the object @@ -41,7 +41,7 @@ Run:\ ```python make_video.py```\ where you can use the following flags * ```-d``` -> Choose to download a specific fits file from the internet. Use ```1``` if you want to, leave empty otherwise. -* ```-csv``` -> Give a specific csv file with sources to include as cutouts in the poster. +* ```-csv``` -> Give a specific csv file with sources to include as cutouts in the poster. If you leave it empty, it goes through the whole field. * ```-N``` -> Number of sources to use * ```-fr``` -> Frame rate of the video. Advice is to use ```60``` to make the video smooth. * ```-fi``` -> Fits file to use. (If you don't download your fits file) diff --git a/make_movie.py b/make_movie.py index a511642..22d4505 100644 --- a/make_movie.py +++ b/make_movie.py @@ -17,21 +17,23 @@ args = parser.parse_args() def distance(obj_1, obj_2): - return sqrt((obj_1[0]-obj_2[0])**2+(obj_1[1]-obj_2[1])**2) + return sqrt((obj_1[0]-obj_2[0])**2+4*(obj_1[1]-obj_2[1])**2) + +def isNaN(a): + return a!=a if __name__ == '__main__': start = timer() if args.framerate: FRAMERATE = args.framerate - else: FRAMERATE = 64 + else: FRAMERATE = 60 if args.sourcenum: N = args.sourcenum else: N = 2 - if args.csvfile: df = pd.read_csv(args.csvfile)[['RA', 'DEC']] - else: df = pd.read_csv('catalogue/'+os.listdir('catalogue')[0])[0:N] - if args.downloading == 1: + download = input('Paste here your url where to find the fits file: ') + fits_download = True Movie = MovieMaker(fits_download=True, imsize=0.4,#defaul imsize framerate=FRAMERATE) @@ -43,32 +45,90 @@ def distance(obj_1, obj_2): file = 'fits/lockman_hole.fits' Movie = MovieMaker(fits_file=get_pkg_data_filename(file), imsize=0.4,#defaul imsize - framerate=FRAMERATE) + framerate=FRAMERATE, zoom_effect=False) - start_coord = Movie.wcs.pixel_to_world(Movie.image_data.shape[0]/2, Movie.image_data.shape[0]/2) - start_dec = start_coord.dec.degree - start_ra = start_coord.ra.degree - Movie.zoom_in(N_frames=1000, first_time=True) - for n in range(len(df)):#stack multiple sources - if n > 0: - dist = distance([last_RA, last_DEC], [df['RA'].values[n], df['DEC'].values[n]]) - move_to_frames = int(300*dist) - else: - dist = distance([start_ra, start_dec], [df['RA'].values[n], df['DEC'].values[n]]) - move_to_frames = int(300*dist) - print(f'Number of frames {move_to_frames}') - Movie.move_to(N_frames=move_to_frames, ra=df['RA'].values[n], dec=df['DEC'].values[n]) - Movie.zoom_in(N_frames=300, imsize_out=df['imsize'].values[n]) - if ndf['imsize'].values[n]: - im_out = max(df['imsize'].values[n+1]+0.1, 0.3) - else: - im_out = max(df['imsize'].values[n] + 0.1, 0.3) - Movie.zoom_out(N_frames=300, imsize_out=im_out) - last_RA, last_DEC = df['RA'].values[n], df['DEC'].values[n] - Movie.move_to(N_frames=int(600*distance([start_ra, start_dec], [df['RA'].values[N-1], df['DEC'].values[N-1]])), ra=start_ra, dec=start_dec) - Movie.zoom_out(N_frames=1000, imsize_out=2) - Movie.record() + if args.csvfile:#go through all objects in csv file + df = pd.read_csv(args.csvfile)[['RA', 'DEC']] + + start_coord = Movie.wcs.pixel_to_world(Movie.image_data.shape[0]/2, Movie.image_data.shape[0]/2) + start_dec = start_coord.dec.degree + start_ra = start_coord.ra.degree + + Movie.zoom_in(N_frames=800, first_time=True) + for n in range(len(df)):#stack multiple sources + if n > 0: + dist = distance([last_RA, last_DEC], [df['RA'].values[n], df['DEC'].values[n]]) + move_to_frames = int(250*dist) + else: + dist = distance([start_ra, start_dec], [df['RA'].values[n], df['DEC'].values[n]]) + move_to_frames = int(250*dist) + print(f'Number of frames {move_to_frames}') + Movie.move_to(N_frames=move_to_frames, ra=df['RA'].values[n], dec=df['DEC'].values[n]) + Movie.zoom_in(N_frames=300, imsize_out=df['imsize'].values[n]) + if ndf['imsize'].values[n]: + im_out = max(df['imsize'].values[n+1]+0.1, 0.3) + else: + im_out = max(df['imsize'].values[n] + 0.1, 0.3) + Movie.zoom_out(N_frames=300, imsize_out=im_out) + last_RA, last_DEC = df['RA'].values[n], df['DEC'].values[n] + print(f"Number of frames {int(400*distance([start_ra, start_dec], [df['RA'].values[N-1], df['DEC'].values[N-1]]))}") + Movie.move_to(N_frames=int(400*distance([start_ra, start_dec], [df['RA'].values[N-1], df['DEC'].values[N-1]])), ra=start_ra, dec=start_dec) + Movie.zoom_out(N_frames=800, imsize_out=2) + Movie.record() + + end = timer() + print(f'MovieMaker took {int(end - start)} seconds') + + else:#go through whole field + from numpy import sqrt, abs, isnan + fits_header = Movie.wcs.to_header() + + number_of_steps = int(fits_header['CRPIX1']*2/3500) + if number_of_steps%2==0: + number_of_steps+=1 # make number of steps uneven to come back in the center + + # clean_image = Movie.image_data[~isnan(Movie.image_data).all(axis=1),:] + # clean_image = clean_image[:,~isnan(clean_image).all(axis=0)] + pix_x_size = fits_header['CRPIX1']*2 #pixel x axis size + pix_y_size = fits_header['CRPIX2']*2 #pixel y axis size + # x_cut = pix_x_size-clean_image.shape[0] #how much cut x + # y_cut = pix_y_size-clean_image.shape[1] #how much cut y + step_size_x = int(pix_x_size/number_of_steps) + step_size_y = int(pix_y_size/number_of_steps) + start_pix_x = int((pix_x_size)/(number_of_steps*2)) + start_pix_y = int(pix_y_size*((number_of_steps*2-1)/(number_of_steps*2))) + + pos_x = start_pix_x + pos_y = start_pix_y + positions = [[start_pix_x, start_pix_y]] + for i in range(number_of_steps-1): + pos_x+=step_size_x + positions.append([pos_x, pos_y]) + + for i in range(1,number_of_steps)[::-1]: + for j in range(i): + pos_y = pos_y + ((-1)**(i+number_of_steps))*step_size_y + positions.append([pos_x, pos_y]) + for j in range(i): + pos_x = pos_x + ((-1)**(i+number_of_steps))*step_size_x + positions.append([pos_x, pos_y]) - end = timer() - print(f'MovieMaker took {int(end - start)} seconds') + positions = [(p.ra.degree, p.dec.degree) for p in [Movie.wcs.pixel_to_world(position[0], position[1]) + for position in positions + if not isNaN(Movie.image_data[position[0], position[1]])]] + Movie.imsize = 2*abs(fits_header['CDELT1']*fits_header['CRPIX1']/7) + Movie.zoom_in(N_frames=600, first_time=True) + start_coord = Movie.wcs.pixel_to_world(Movie.image_data.shape[0] / 2, Movie.image_data.shape[0] / 2) + start_dec = start_coord.dec.degree + start_ra = start_coord.ra.degree + for n, position in enumerate(positions): + if n==0: + dist = distance([start_ra, start_dec], [position[0], position[1]]) + else: + dist = distance([position[0], position[1]], [positions[n-1][0], positions[n-1][1]]) + move_to_frames = int(200 * dist) + print(f'Number of frames {move_to_frames}') + Movie.move_to(N_frames=move_to_frames, ra=position[0], dec=position[1]) + Movie.zoom_out(N_frames=600, imsize_out=3) + Movie.record() \ No newline at end of file diff --git a/poster/scripts/imaging.py b/poster/scripts/imaging.py index 6137d15..3c64737 100644 --- a/poster/scripts/imaging.py +++ b/poster/scripts/imaging.py @@ -15,7 +15,7 @@ class ImagingLofar: def __init__(self, fits_file=None, fits_download: bool=False, vmin: float = None, vmax: float = None, - image_directory: str='poster/images', verbose=True): + image_directory: str='poster/images', verbose: bool = True, zoom_effect: bool = True): """ Make LOFAR images (also applicable on other telescope surveys) ------------------------------------------------------------ @@ -28,6 +28,7 @@ def __init__(self, fits_file=None, fits_download: bool=False, vmin: float = None """ self.fits_file = fits_file self.verbose = verbose + self.zoom_effect = zoom_effect if self.verbose: print(f"Started imaging {fits_file.split('/')[-1].replace('.fits','').replace('_',' ').replace('.',' ').title()}...") if fits_download: @@ -62,8 +63,10 @@ def __init__(self, fits_file=None, fits_download: bool=False, vmin: float = None if 'cutout' in self.fits_file: self.image_data = gaussian_filter(self.tonemap(image_data=self.image_data, b=0.25, threshold=self.vmin), sigma=3) else: - # self.image_data = gaussian_filter(self.tonemap(image_data=self.image_data, b=0.5, threshold=0), sigma=2) - self.image_data = gaussian_filter(self.tonemap(image_data=self.image_data, b=0.25, threshold=self.vmin/100), sigma=1) + if self.zoom_effect: + self.image_data = gaussian_filter(self.tonemap(image_data=self.image_data, b=0.5, threshold=0), sigma=2) + else: + self.image_data = gaussian_filter(self.tonemap(image_data=self.image_data, b=0.25, threshold=self.vmin / 100), sigma=1) def tonemap(self, image_data=None, b: float = 0.25, threshold: float = None): """ @@ -118,17 +121,17 @@ def imaging(self, image_data=None, wcs=None, image_name: str = 'Nameless', dpi: if 'cutout' in self.fits_file: plt.imshow(image_data, origin='lower', cmap=cmap, vmin=self.vmin*2) else: - plt.imshow(np.clip(image_data, a_min=None, a_max=self.vmax), - norm=LogNorm(vmin=self.vmin/1.4, vmax=self.vmax), origin='lower', cmap=cmap) #HIGH RES VIDEO: - # vmax = self.vmax - # vmin = min((2/max(imsize,0.2))*(self.vmin/100), self.vmax/20) - # if imsize<1: - # vmax/=max(imsize,0.2) - # plt.imshow(image_data, - # norm=SymLogNorm(linthresh=vmin*20, vmin=self.vmin/1.4, vmax=vmax), origin='lower', cmap=cmap) - #OLD VIDEO: - # plt.imshow(image_data, norm=SymLogNorm(linthresh=self.vmin/20, vmin=self.vmin/50, vmax=self.vmax), origin='lower', cmap=cmap) + if self.zoom_effect: + vmax = self.vmax + vmin = min((2/max(imsize,0.2))*(self.vmin/100), self.vmax/20) + if imsize<1: + vmax/=max(imsize,0.2) + plt.imshow(image_data, + norm=SymLogNorm(linthresh=vmin*20, vmin=self.vmin/1.4, vmax=vmax), origin='lower', cmap=cmap) + else: + plt.imshow(np.clip(image_data, a_min=None, a_max=self.vmax), + norm=LogNorm(vmin=self.vmin / 1.4, vmax=self.vmax), origin='lower', cmap=cmap) if text: plt.annotate( s=text, diff --git a/video/scripts/moviemaker.py b/video/scripts/moviemaker.py index 4bd6b89..8dbd01c 100644 --- a/video/scripts/moviemaker.py +++ b/video/scripts/moviemaker.py @@ -14,7 +14,8 @@ class MovieMaker(ImagingLofar): MovieMaker makes it possible to make individual frames and turn them into a video. """ def __init__(self, fits_file: str = None, imsize: float = None, framerate: float = None, process: str = None, - fits_download: bool=False, new: bool = True, text: str = None): + fits_download: bool=False, new: bool = True, text: str = None, vmin: float = None, vmax: float = None, zoom_effect: bool = False, + output_file: str = 'video/frames'): """ :param fits_file: fits file name :param imsize: initial image size @@ -22,15 +23,18 @@ def __init__(self, fits_file: str = None, imsize: float = None, framerate: float :param process: process [multiprocess, multithread, None] :param fits_download: download fits file """ - super().__init__(fits_file = fits_file, image_directory='video/frames', verbose=False, fits_download = fits_download) + self.output_file = output_file + super().__init__(fits_file = fits_file, image_directory=self.output_file, verbose=False, fits_download=fits_download, vmin=vmin, vmax=vmax, zoom_effect=zoom_effect) self.process = process self.imsize = imsize self.framerate = framerate self.ra = None self.dec = None self.text = text + self.zoom_effect = zoom_effect if new: - os.system('rm -rf video/frames; mkdir video/frames') + os.system(f'rm -rf {output_file}; mkdir {output_file}') + def __call__(self, imsize: float = None, process: str = None): """ @@ -86,10 +90,10 @@ def make_frames(self): Record individual frames and save in video/frames/ ------------------------------------------------------------ """ - N_max = len(os.listdir('video/frames'))+len(self.ragrid) #max number of videos - N_min = len(os.listdir('video/frames')) #min number of videos + N_max = len(os.listdir(self.output_file))+len(self.ragrid) #max number of videos + N_min = len(os.listdir(self.output_file)) #min number of videos inputs = zip(range(N_min, N_max), self.ragrid, self.decgrid, self.imsizes, - np.clip(200/np.array(self.imsizes), a_min=250, a_max=600).astype(int)) + np.clip(200/np.array(self.imsizes), a_min=450, a_max=700).astype(int)) if self.process == "multithread": print(f"Multithreading") print(f"Might get error or bad result because multithreading is difficult with imaging.") @@ -177,7 +181,7 @@ def record(self, audio: str = None): ------------------------------------------------------------ :param audio: add audio (True or False). """ - os.system(f'rm movie.mp4; ffmpeg -f image2 -r {self.framerate} -start_number 0 -i video/frames/image_%05d.png -vf "scale=960:540,setsar=1" movie.mp4') + os.system(f'rm movie.mp4; ffmpeg -f image2 -r {self.framerate} -start_number 0 -i {self.output_file}/image_%05d.png movie.mp4') if audio: try: audio_file = input(audio)