-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.m
105 lines (78 loc) · 3.52 KB
/
main.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
clc
clear all
close all
warning off all;
% Xavier Aguas.
% Artificial Intelligence and Computer Vision Research Lab
% Escuela Politécnica Nacional, Quito - Ecuador
% Jun 5, 2020
addpath('ReadDataset');
addpath('Preprocessing');
addpath('Segmentation');
addpath('DTW distance');
addpath('TrainingModel');
addpath('Feature extraction');
addpath('libs'); % libreria de Jonathan
gestures = {'noGesture', 'open', 'fist', 'waveIn', 'waveOut', 'pinch'};
%% ======================= Model Configuration ===========================
load options.mat
% This command makes possible the reproducibility of the results
rng('default');
%%
userFolder = 'testing';
folderData = [userFolder 'JSON'];
filesInFolder = dir(folderData);
numFiles = length(filesInFolder);
userProcessed = 0;
% responses.userGroup = userFolder;
gestures = {'noGesture', 'open', 'fist', 'waveIn', 'waveOut', 'pinch'};
for user_i = 1:numFiles
if ~(strcmpi(filesInFolder(user_i).name, '.') || strcmpi(filesInFolder(user_i).name, '..') || strcmpi(filesInFolder(user_i).name, '.DS_Store'))
%% Adquisition
userProcessed = userProcessed + 1;
file = [folderData '/' filesInFolder(user_i).name '/' filesInFolder(user_i).name '.json'];
text = fileread(file);
user = jsondecode(text);
fprintf('Processing data from user: %d / %d\n', userProcessed, numFiles-2);
close all;
% Reading the training samples
version = 'training';
currentUserTrain = recognitionModel(user, version, gestures, options);
[train_RawX_temp, train_Y_temp] = currentUserTrain.getTotalXnYByUser();
%% Preprocessing
% Filter applied
train_FilteredX_temp = currentUserTrain.preProcessEMG(train_RawX_temp);
% Making a single set with the training samples of all the classes
[filteredDataX, dataY] = currentUserTrain.makeSingleSet(train_FilteredX_temp, train_Y_temp);
% Finding the EMG that is the center of each class
bestCenters = currentUserTrain.findCentersOfEachClass(filteredDataX, dataY);
%% Feature Extraction
% Feature extraction by computing the DTW distanc
dataX = currentUserTrain.featureExtraction(filteredDataX, bestCenters);
% Preprocessing the feature vectors
nnModel = currentUserTrain.preProcessFeatureVectors(dataX);
%% Training
% Training the feed-forward NN
nnModel.model = currentUserTrain.trainSoftmaxNN(nnModel.dataX, dataY);
nnModel.numNeuronsLayers = currentUserTrain.numNeuronsLayers;
nnModel.transferFunctions = currentUserTrain.transferFunctions;
nnModel.centers = bestCenters;
%% Testing
% Reading the testing samples
version = 'testing';
currentUserTest = recognitionModel(user, version, gestures, options); %%gestures 2 6
test_RawX = currentUserTest.getTotalXnYByUser();
% Classification
[predictedSeq, timeClassif, vectorTime] = currentUserTest.classifyEMG_SegmentationNN(test_RawX, nnModel);
% Pos-processing labels
[predictedLabels, timePos] = currentUserTest.posProcessLabels(predictedSeq);
% Computing the time of processing
estimateTime = currentUserTest.computeTime(timeClassif, timePos);
% Concatenating the predictions of all the users for computing the
% errors
responses.(version).(user.userInfo.name) = currentUserTest.recognitionResults(predictedLabels,predictedSeq,timeClassif,vectorTime,'testing');
end
clc
end
currentUserTest.generateResultsJSON(responses);