From 439d429eb624abc2da6f6c08f4ab55e88486b8c6 Mon Sep 17 00:00:00 2001 From: Murad Abu-Khalaf Date: Thu, 25 Mar 2021 16:02:36 -0400 Subject: [PATCH] refactor open-loop tests --- testOpenLoopSynthesizer.py | 214 ++++++++++++++++++++++++++++++++++ trainViewSynthesizerNNet.py | 222 +++--------------------------------- 2 files changed, 231 insertions(+), 205 deletions(-) create mode 100644 testOpenLoopSynthesizer.py diff --git a/testOpenLoopSynthesizer.py b/testOpenLoopSynthesizer.py new file mode 100644 index 0000000..96a1a30 --- /dev/null +++ b/testOpenLoopSynthesizer.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python + +# Author: Murad Abu-Khalaf, MIT CSAIL. + +""" + Open-loop testing of the Synthesizer's ability to generate reference views. + +""" + +import matplotlib.pyplot as plt +import torch +from tqdm import tqdm +import os +import cv2 +import numpy as np + + +import trainViewSynthesizerNNet + +training_data = trainViewSynthesizerNNet.getNumPyTrainingData() +net = trainViewSynthesizerNNet.net +device = trainViewSynthesizerNNet.device + +###### Define the Test Methods ####### + +def showTrainingData(): + """ + Plots observed Views to serve as input to the synthesizer + along with observed views at desired distances that will + serve as ground truth for the output of the Synthesizer + during training. + + This simply shows the training data. All views are Camera views, and + non are synthesizer views. + """ + print("Training Dataset Size:" + str(len(training_data))) + for idx in range(len(training_data)): + observed = training_data[idx][0] + desired_spacing = training_data[idx][1][0,0,0] + desired_view = training_data[idx][2] + + plt.figure(1, figsize=(9,5)) + plt.subplot(1,2,1) + plt.title("Observed View (Input to Synthesizer)") + plt.imshow(observed.transpose(1,2,0), cmap="viridis") + plt.show(block = False) + + plt.subplot(1,2,2) + plt.title("Ground Truth: Observed View for " + '{:4.2f}'.format(desired_spacing)) + plt.imshow(desired_view.transpose(1,2,0), cmap="viridis") + plt.show(block = False) + plt.pause(0.1) + + +def generateReferenceViewFromObservation(): + """ + Generates a reference view for a FIXED reference distance and FIXED camera view. + """ + idx = 747 # Picking an observation + observed = training_data[idx][0] + spacing = training_data[idx][1] + groundtruth = training_data[idx][2] + + plt.figure(2, figsize=(9,5)) + plt.subplot(1,3,1) + plt.axis("off") + plt.title("Observed View") + plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") + + generated = net(torch.Tensor([observed]).to(device), torch.Tensor(spacing).to(device)) + generated = generated.to('cpu').detach().numpy()[0] + plt.subplot(1,3,2) + plt.axis("off") + plt.title("Generated View for " + '{:4.2f}'.format(spacing.item())) + plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") + + plt.subplot(1,3,3) + plt.axis("off") + plt.title("Ground Truth View for " + '{:4.2f}'.format(spacing.item())) + plt.imshow(groundtruth.transpose(1,2,0)/255.0, cmap="viridis") + + plt.show(block = True) + + # Save the generated view for publication purposes + plt.figure(2,frameon=False) + plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") + plt.axis("off") + plt.savefig('observed.png',bbox_inches='tight', pad_inches=0) + + plt.figure(2,frameon=False) + plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") + plt.axis("off") + plt.savefig('generated.png',bbox_inches='tight', pad_inches=0) + + +def generateReferenceViewsFromObservation(): + """ + Generates reference views for a VARYING reference distance and FIXED camera view. + """ + idx = 150 # Picking an observation + observed = training_data[idx][0] + plt.figure(3, figsize=(9,5)) + plt.ion() + plt.subplot(1,2,1) + plt.title("Observed View") + plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") + + plt.subplot(1,2,2) + for s in tqdm(range(10,31,10)): + generated = net(torch.Tensor([observed]).to(device), torch.Tensor([[[[s]]]]).to(device)) + generated = generated.to('cpu').detach().numpy()[0] + plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") + plt.title("Generated View for " + '{:4.2f}'.format(s)) + plt.pause(1.00) + #plt.draw() + + input("Press [enter] to close.") + +def generateReferenceViewFromObservations(): + """ + Generates a reference view for a FIXED reference distance and VARYING camera views. + """ + s = 10 # Picking a desired spacing + + fig = plt.figure(4, figsize=(9,5)) + plt.ion() + sub1 = fig.add_subplot(1,2,1) + sub1.set_title("Observed View") + sub2 = fig.add_subplot(1,2,2) + sub2.set_title("Generated View for " + '{:4.2f}'.format(s)) + + for idx in tqdm(range(0,len(training_data),1)): + observed = training_data[idx][0] + generated = net(torch.Tensor([observed]).to(device), torch.Tensor([[[[s]]]]).to(device)) + generated = generated.to('cpu').detach().numpy()[0] + sub1.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") + sub2.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") + plt.pause(0.25) + #plt.draw() + + input("Press [enter] to close.") + + +def testGeneralizationDataSet(): + """ + Generates reference views for a VARYING reference distance and Fixed camera view. + """ + + # Create NumPy tensors from images + IMG_HEIGHT = 128 + IMG_WIDTH = 128 + + TestingFolder = "CameraViewDistanceDataSet/TestingDataSet" + LABELS = [f for f in os.listdir(TestingFolder) if not f.startswith('.')] # Use this to avoid hidden files + LABELS.sort() + + test_data = [] + for label in tqdm(LABELS): + try: + path = os.path.join(TestingFolder, label) + img = cv2.imread(path, cv2.IMREAD_COLOR) # HxWxC + img = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT)) + img = img.transpose(2,0,1) # HxWxC ==> CxHxW + img = img[::-1,:,:] # BGR ==> RGB + test_data.append(np.array(img)) + except Exception as e: + print(e) + pass + + # Generate a reference view from an observation + idx = 333 # Picking an observation + observed = test_data[idx] + plt.figure(6, figsize=(9,5)) + plt.ion() + plt.subplot(1,2,1) + plt.title("Observed Camera Feed") + plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") + + plt.subplot(1,2,2) + for s in tqdm(range(10,31,10)): + generated = net(torch.Tensor([observed]).to(device), torch.Tensor([[[[s]]]]).to(device)) + generated = generated.to('cpu').detach().numpy()[0] + plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") + plt.title("Generated Scene View for Spacing " + '{:4.2f}'.format(s)) + plt.pause(1.00) + + input("Press [enter] to close.") + + # Save the generated view for publication purposes + plt.figure(6,frameon=False) + plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") + plt.axis("off") + plt.savefig('observed.png',bbox_inches='tight', pad_inches=0) + + plt.figure(6,frameon=False) + plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") + plt.axis("off") + plt.savefig('generated.png',bbox_inches='tight', pad_inches=0) + + +###### Choose the desired test by speciying a number ####### + +testID = 4 + +if testID == 0: + showTrainingData() +elif testID == 1: + generateReferenceViewFromObservation() +elif testID == 2: + generateReferenceViewsFromObservation() +elif testID == 3: + generateReferenceViewFromObservations() +elif testID == 4: + testGeneralizationDataSet() diff --git a/trainViewSynthesizerNNet.py b/trainViewSynthesizerNNet.py index c006090..937c844 100755 --- a/trainViewSynthesizerNNet.py +++ b/trainViewSynthesizerNNet.py @@ -8,7 +8,7 @@ It assumes a data set is available in the form of raw images and a text file for distances. Two flags: - REBUILD_DATA -- prepares a tensor from the training data set (default False) + REBUILD_DATA -- prepares a NumPy tensor from the training data set (default False) TRAIN_NN -- trains the neural network and replaces existing trained one (default False) """ @@ -99,13 +99,16 @@ def make_training_data(self): if REBUILD_DATA: acarImages.make_training_data() -training_data = [] -for townFolder in acarImages.TOWN_FOLDERS: - training_data_Town = np.load("training_data_tensor_" + townFolder + ".npy", allow_pickle=True) - if len(training_data) == 0: - training_data = training_data_Town - continue - training_data = np.concatenate((training_data, training_data_Town), axis=0) +def getNumPyTrainingData(): + """Returns the NumPy training data tensors""" + training_data = [] + for townFolder in acarImages.TOWN_FOLDERS: + training_data_Town = np.load("training_data_tensor_" + townFolder + ".npy", allow_pickle=True) + if len(training_data) == 0: + training_data = training_data_Town + continue + training_data = np.concatenate((training_data, training_data_Town), axis=0) + return training_data ###### Build Neural Network ####### @@ -283,9 +286,11 @@ def forward(self, xrgb, xphi): #quit() -###### Train Neural Network ####### +###### Prepare Torch Tensors from NumPy Tensors ####### import torch.optim as optim +training_data = getNumPyTrainingData() + img_feed = torch.Tensor([i[0] for i in training_data]) s = torch.Tensor([i[1] for i in training_data]).view(-1,1,1,1) img_syn = torch.Tensor([i[2] for i in training_data]) @@ -316,6 +321,9 @@ def forward(self, xrgb, xphi): loader = torch.utils.data.DataLoader(observed_dist_target, batch_size=BATCH_SIZE, shuffle=True, pin_memory = True, drop_last = True) def train(net): + """ + Train Neural Network + """ optimizer = optim.Adam(net.parameters(), lr=0.001) #loss_function = nn.MSELoss(reduction = 'mean') #loss_function = nn.SmoothL1Loss(reduction = 'mean') @@ -353,199 +361,3 @@ def train(net): else: net.load_state_dict(torch.load('viewSynthesizerNNet.pth', map_location=device)) net.eval() - - -###### Test Training Result ####### - -def test0(): - # Shows the training data only, no generated views or use of the NN - #idx = random.randint(0, len(training_data)) - print("Training Dataset Size:" + str(len(training_data))) - #print(train_s) - for idx in range(len(training_data)): - observed = training_data[idx][0] - desired_spacing = training_data[idx][1][0,0,0] - desired_view = training_data[idx][2] - - plt.figure(1, figsize=(9,5)) - plt.subplot(1,2,1) - plt.title("Observed View") - plt.imshow(observed.transpose(1,2,0), cmap="viridis") - plt.show(block = False) - - plt.subplot(1,2,2) - plt.title("Ground Truth View for " + '{:4.2f}'.format(desired_spacing)) - plt.imshow(desired_view.transpose(1,2,0), cmap="viridis") - plt.show(block = False) - plt.pause(0.1) - - -def test1(net): - # Generates a reference view: FIXED reference distance with FIXED camera view. - idx_ref = 80 # Picking a desired spacing - idx = 747 # Picking an observation - print(len(train_img0)) - print(len(training_data)) - observed = train_img0[idx].to('cpu').numpy() - plt.figure(2, figsize=(9,5)) - plt.subplot(1,3,1) - plt.axis("off") - plt.title("Observed View") - plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") - - generated = net(train_img0[idx:idx+1].to(device), train_s[idx_ref:idx_ref+1].to(device)) - generated = generated.to('cpu').detach().numpy()[0] - plt.subplot(1,3,2) - plt.axis("off") - plt.title("Generated View for " + '{:4.2f}'.format(train_s[idx_ref:idx_ref+1].item())) - plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") - - groundtruth = train_img[idx_ref].to('cpu').numpy() - plt.subplot(1,3,3) - plt.axis("off") - plt.title("Ground Truth View for " + '{:4.2f}'.format(train_s[idx_ref:idx_ref+1].item())) - plt.imshow(groundtruth.transpose(1,2,0)/255.0, cmap="viridis") - - plt.show(block = True) - - plt.figure(2,frameon=False) - plt.imshow(observed.transpose(1,2,0)/255.0, cmap="viridis") - plt.axis("off") - plt.savefig('observed.png',bbox_inches='tight', pad_inches=0) - - plt.figure(2,frameon=False) - plt.imshow(generated.transpose(1,2,0)/255.0, cmap="viridis") - plt.axis("off") - plt.savefig('generated.png',bbox_inches='tight', pad_inches=0) - - -def test2(net): - # Generates a reference view: FIXED camera view with VARYING reference distance - idx = 150 # Picking an observation - camera_feed_ = train_img0[idx].to('cpu').numpy() - plt.figure(3, figsize=(9,5)) - plt.ion() - plt.subplot(1,2,1) - plt.title("Observed View") - plt.imshow(camera_feed_.transpose(1,2,0)/255.0, cmap="viridis") - #plt.show(block = False) - - camera_feed = train_img0[idx:idx+1].to(device) - plt.subplot(1,2,2) - for idx_ref in tqdm(range(5,40,1)): - img_hat = net(camera_feed, torch.Tensor([[[[idx_ref]]]]).to(device)) - img_hat = img_hat.to('cpu').detach().numpy()[0] - plt.imshow(img_hat.transpose(1,2,0)/255.0, cmap="viridis") - plt.title("Generated View for " + '{:4.2f}'.format(idx_ref)) - plt.pause(0.25) - #plt.draw() - - input("Press [enter] to close.") - -def test3(net): - # Generates a reference view: FIXED reference distance with VARYING camera views - idx_ref = 10 # Picking a desired spacing - - fig = plt.figure(4, figsize=(9,5)) - plt.ion() - sub1 = fig.add_subplot(1,2,1) - sub1.set_title("Observed View") - sub2 = fig.add_subplot(1,2,2) - sub2.set_title("Generated View for " + '{:4.2f}'.format(idx_ref)) - - for idx in tqdm(range(0,len(train_img0),1)): - camera_feed_ = train_img0[idx].to('cpu').numpy() - camera_feed = train_img0[idx:idx+1].to(device) - img_hat = net(camera_feed, torch.Tensor([[[[idx_ref]]]]).to(device)) - img_hat = img_hat.to('cpu').detach().numpy()[0] - sub1.imshow(camera_feed_.transpose(1,2,0)/255.0, cmap="viridis") - sub2.imshow(img_hat.transpose(1,2,0)/255.0, cmap="viridis") - plt.pause(0.25) - #plt.draw() - - input("Press [enter] to close.") - - -def testGeneralizationDataSet(net): - # Generates a reference view: FIXED reference distance with FIXED camera view. - - IMG_HEIGHT = 128 - IMG_WIDTH = 128 - - CAMERA_IMAGES_FOLDER_Web = "CameraViewDistanceDataSet/GeneralizationDataSet" - LABELS_Web = [f for f in os.listdir(CAMERA_IMAGES_FOLDER_Web) if not f.startswith('.')] # Use this to avoid hidden files - LABELS_Web.sort() - - test_data = [] - for label in tqdm(LABELS_Web): - try: - path = os.path.join(CAMERA_IMAGES_FOLDER_Web, label) - img = cv2.imread(path, cv2.IMREAD_COLOR) # HxWxC - img = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT)) - img = img.transpose(2,0,1) # HxWxC ==> CxHxW - img = img[::-1,:,:] # BGR ==> RGB - test_data.append(np.array(img)) - except Exception as e: - print(e) - pass - - img_web = torch.Tensor([i for i in test_data]) - - idx = 333 - camera_feed_ = img_web[idx].to('cpu').numpy() - plt.figure(6, figsize=(9,5)) - plt.ion() - plt.subplot(1,2,1) - plt.title("Observed Camera Feed") - plt.imshow(camera_feed_.transpose(1,2,0)/255.0, cmap="viridis") - - camera_feed = img_web[idx:idx+1].to(device) - plt.subplot(1,2,2) - for idx_ref in tqdm(range(10,21,10)): - img_hat = net(camera_feed, torch.Tensor([[[[idx_ref]]]]).to(device)) - img_hat = img_hat.to('cpu').detach().numpy()[0] - plt.imshow(img_hat.transpose(1,2,0)/255.0, cmap="viridis") - plt.title("Generated Scene View for Spacing " + '{:4.2f}'.format(idx_ref)) - plt.pause(0.25) - - input("Press [enter] to close.") - - plt.figure(6,frameon=False) - plt.imshow(camera_feed_.transpose(1,2,0)/255.0, cmap="viridis") - plt.axis("off") - plt.savefig('observed.png',bbox_inches='tight', pad_inches=0) - - plt.figure(6,frameon=False) - plt.imshow(img_hat.transpose(1,2,0)/255.0, cmap="viridis") - plt.axis("off") - plt.savefig('generated.png',bbox_inches='tight', pad_inches=0) - - -def testDiff(net,yref,y): - i2 = yref - i1 = y - plt.figure(4) - plt.show(block = False) - img1 = net(s_sorted_A[i1:(i1+1)].to(device)) - img1 = img1.to('cpu') - img1 = img1.detach().numpy()[0] - img2 = net(s_sorted_A[i2:(i2+1)].to(device)) - img2 = img2.to('cpu') - img2 = img2.detach().numpy()[0] - imgdiff = img2 - img1 - print(imgdiff) - imgdiffabs = cv2.convertScaleAbs(imgdiff) - plt.imshow(imgdiffabs.transpose(1,2,0)/255.0, cmap="viridis") - plt.draw() - K = np.zeros((3,150,200)) - K[:,:,75:125] = 1 - u = np.sum(K*(imgdiff)/255.0) - return img1, img2, u - - - -#test0() -#test1(net) -#test2(net) -#test3(net) -#testGeneralizationDataSet(net) \ No newline at end of file