Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

testing RBF and Linear model #2

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added Outputs/svm_rbf_full_feature_vector.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
10,970 changes: 2,479 additions & 8,491 deletions data_set_v1.csv

Large diffs are not rendered by default.

2,738 changes: 2,738 additions & 0 deletions data_set_v2.csv

Large diffs are not rendered by default.

Binary file added source/__pycache__/postprocessing.cpython-38.pyc
Binary file not shown.
Binary file added source/__pycache__/preprocessing.cpython-38.pyc
Binary file not shown.
Binary file added source/__pycache__/segmentation.cpython-38.pyc
Binary file not shown.
Binary file added source/__pycache__/svm.cpython-38.pyc
Binary file not shown.
Binary file added source/__pycache__/utilities.cpython-38.pyc
Binary file not shown.
88 changes: 53 additions & 35 deletions source/main.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from utilities import *
from preprocessing import *
import segmentation
from utilities import Utilities
from preprocessing import Preprocessing
from segmentation import *
import svm
from postprocessing import Postprocessing
import time
import pandas as pd
import matplotlib.pyplot as plt


def compare_segmentation_methods():
Expand Down Expand Up @@ -55,34 +56,44 @@ def compare_segmentation_methods():
# ISIC_0027861

if __name__ == "__main__":

#col_names = ['f_a_0', 'f_a_1', 'f_a_2', 'f_a_3', 'f_b_0', 'f_c_0', 'f_c_1', 'f_c_2', 'f_c_3', 'f_c_4']

training_data = svm.Prediction.data_frames(pd.read_csv('data_set_v1.csv' , index_col=False))

print("training_data=", type(training_data))

model_prediction = svm.Prediction.grid_search(training_data)
training_data = svm.Prediction.data_frames(pd.read_csv('data_set_v1.csv' , index_col=0))

'''
data_set_path = "D:/Uni/WS 22-23/Digitale Bildverarbeitung/common_dataset/Dataset/" # Ghassan
#data_set_path = "C:/Users/ancik/Documents/GitHub/archive/HAM10000_images/"
# training_data = svm.Prediction.data_frames(pd.read_csv('data_set_v2.csv' , index_col=0))

# print("training_data=", type(training_data))


model_prediction = svm.Prediction.grid_search_RBF(training_data)

"""
# Define Data set Path

# data_set_path = "D:/Uni/WS 22-23/Digitale Bildverarbeitung/common_dataset/Dataset/" # Ghassan
data_set_path = "C:/Users/Yazan/Desktop/DBV literature/Data/Data_set" # Yazan
#data_set_path = "C:/Users/ancik/Documents/GitHub/archive/HAM10000_images/" # Anna

# features, independent_features = svm.Prediction.feature_extraction(data_set_path)
data_set = []

# start timer
start_time = time.process_time()

# Metadata loading
# load HAM 10 000 dataset labels
dataset_metadata_path = "D:/Uni/WS 22-23/Digitale Bildverarbeitung/dataset/HAM10000_metadata.csv"

dataset_metadata_path = "C:/Users/Yazan/Desktop/DBV literature/Data/HAM10000_metadata.csv"

# which labels from metadata we consider malign=positive=1 (others benign=0=negative)
list_of_malign_labels = ['mel', 'bcc'] # bcc rarely metastizes
list_of_malign_labels = ['mel'] # bcc rarely metastizes
meta_data = Utilities.extract_labels_HAM10000(dataset_metadata_path, list_of_malign_labels)

# gen_file_names rename to generate_file_paths

images_paths = Utilities.gen_file_names(data_set_path)

img_count = 0
img_failed = 0
for img_path in images_paths:
Expand All @@ -100,12 +111,14 @@ def compare_segmentation_methods():
gamma_image = Preprocessing.gamma_correction(img, gamma=0.85)
blured_img = Preprocessing.blur(gamma_image)

binary_image = segmentation.NormalizedOtsuWithAdaptiveThresholding.segment(blured_img)

binary_image = NormalizedOtsuWithAdaptiveThresholding.segment(blured_img)
# feature extraction
longest_contour = Postprocessing.find_contours(binary_image)
features = Postprocessing.feature_extractrion(img_number, longest_cntr=longest_contour, image_shape=binary_image.shape)

features , independent_features = Postprocessing.feature_extractrion(img_number, longest_cntr=longest_contour, image_shape=binary_image.shape)

print("Independent Features ", independent_features)

img_feature_list = { 'img_number': img_number,

'metadata_label': meta_data[img_number], # 1 for malign etc. positive, 0 for benign etc. negative
Expand All @@ -121,26 +134,31 @@ def compare_segmentation_methods():
'f_c_1':features[2][1],
'f_c_2':features[2][2],
'f_c_3':features[2][3],
'f_c_4':features[2][4]
'f_c_4':features[2][4],

'ind_0':independent_features[0],
'ind_1':independent_features[1],
'ind_2':independent_features[2],
'ind_3':independent_features[3],
'ind_4':independent_features[4]

}
if (None in img_feature_list.values()): img_failed += 1

data_set.append(img_feature_list)

Utilities.save_dataset(dataset=data_set, file_path="./data_set.csv", only_succesfull=True)
Utilities.save_dataset(dataset=data_set, file_path="./data_set_v2.csv", only_succesfull=True)

end_time = time.process_time()
end_time = time.process_time()

total_time = (end_time - start_time)*1000 # in millis
total_time = (end_time - start_time)*1000 # in millis

avg_time = total_time / img_count

print("total_time = %.0f min" % (total_time/1000/60))
print("avg_time = %.0f ms per image" % avg_time)
print("img_failed: %d ... %.1f%% of total images" %(img_failed, img_failed/img_count*100))
print("img_count", img_count)
'''
# print("complete_data_set=", data_set)
avg_time = total_time / img_count

print("total_time = %.0f min" % (total_time/1000/60))
print("avg_time = %.0f ms per image" % avg_time)
print("img_failed: %d ... %.1f%% of total images" %(img_failed, img_failed/img_count*100))
print("img_count", img_count)
"""




15 changes: 10 additions & 5 deletions source/postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ def feature_extractrion(image_number, longest_cntr, image_shape, open_contour_th
#print("longest_cntr=", longest_cntr)

first_tuple = (None, None, None, None)
ghassan_tuple = (None)
ellipse_tuple = (None)
third_tuple = (None, None, None, None, None)

independent_features = (None, None, None, None, None)
if longest_cntr is not None:

largest_area = cv2.contourArea(longest_cntr)
Expand Down Expand Up @@ -75,6 +75,8 @@ def feature_extractrion(image_number, longest_cntr, image_shape, open_contour_th

'''since just closed contours are considered, the param @closed always set to True '''
perimeter = cv2.arcLength(longest_cntr, True)



''' Paper: Melanoma Skin Cancer Detection Using Image Processing and Machine Learning Techniques '''
ir_A = perimeter / largest_area
Expand All @@ -93,9 +95,10 @@ def feature_extractrion(image_number, longest_cntr, image_shape, open_contour_th

cv2.ellipse(external_contours,ellipse,(255),2)

ghassan_tuple = (ellipse_irrigularity)
ellipse_tuple = (ellipse_irrigularity)

''' Paper: Computer aided Melanoma skin cancer detection using Image Processing ..'''
''' Paper: Computer aided Melanoma skin cancer detection using Image Processing ..'''

Circularity_indx = (4*largest_area*np.pi) / perimeter**2

ir_A = perimeter / largest_area
Expand All @@ -110,6 +113,8 @@ def feature_extractrion(image_number, longest_cntr, image_shape, open_contour_th

# cv2.ellipse(external_contours,ellipse,255,2)
cv2.circle(external_contours, (com_x, com_y), 5, 255, -1)

independent_features = (perimeter,largest_area,minor_diameter,major_diameter,ellipse_irrigularity)

else:
#cv2.putText(external_contours,"Unable to detect closed contours!",(50,250),
Expand All @@ -118,4 +123,4 @@ def feature_extractrion(image_number, longest_cntr, image_shape, open_contour_th
print("Unable to detect closed contours to this image:!" + str(image_number))


return [first_tuple, ghassan_tuple, third_tuple]
return [first_tuple, ellipse_tuple, third_tuple] , independent_features
Loading