-
Notifications
You must be signed in to change notification settings - Fork 0
/
objDetect.js
128 lines (99 loc) · 3.85 KB
/
objDetect.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
/**
* Created by chad hart on 11/30/17.
* Client side of Tensor Flow Object Detection Web API
* Written for webrtcHacks - https://webrtchacks.com
*/
//Parameters
const s = document.getElementById('objDetect');
const sourceVideo = s.getAttribute("data-source"); //the source video to use
const uploadWidth = s.getAttribute("data-uploadWidth") || 640; //the width of the upload file
const mirror = s.getAttribute("data-mirror") || false; //mirror the boundary boxes
const scoreThreshold = s.getAttribute("data-scoreThreshold") || 0.5;
const apiServer = s.getAttribute("data-apiServer") || window.location.origin + '/image'; //the full TensorFlow Object Detection API server url
//Video element selector
v = document.getElementById(sourceVideo);
//for starting events
let isPlaying = false,
gotMetadata = false;
//Canvas setup
//create a canvas to grab an image for upload
let imageCanvas = document.createElement('canvas');
let imageCtx = imageCanvas.getContext("2d");
//create a canvas for drawing object boundaries
let drawCanvas = document.createElement('canvas');
document.body.appendChild(drawCanvas);
let drawCtx = drawCanvas.getContext("2d");
//draw boxes and labels on each detected object
function drawBoxes(objects) {
//clear the previous drawings
drawCtx.clearRect(0, 0, drawCanvas.width, drawCanvas.height);
//filter out objects that contain a class_name and then draw boxes and labels on each
objects.filter(object => object.class_name).forEach(object => {
let x = object.x * drawCanvas.width;
let y = object.y * drawCanvas.height;
let width = (object.width * drawCanvas.width) - x;
let height = (object.height * drawCanvas.height) - y;
//flip the x axis if local video is mirrored
if (mirror) {
x = drawCanvas.width - (x + width)
}
drawCtx.fillText(object.class_name + " - " + Math.round(object.score * 100) + "%", x + 5, y + 20);
drawCtx.strokeRect(x, y, width, height);
});
}
//Add file blob to a form and post
function postFile(file) {
//Set options as form data
let formdata = new FormData();
formdata.append("image", file);
formdata.append("threshold", scoreThreshold);
let xhr = new XMLHttpRequest();
xhr.open('POST', apiServer, true);
xhr.onload = function () {
if (this.status === 200) {
let objects = JSON.parse(this.response);
//draw the boxes
drawBoxes(objects);
//Save and send the next image
imageCtx.drawImage(v, 0, 0, v.videoWidth, v.videoHeight, 0, 0, uploadWidth, uploadWidth * (v.videoHeight / v.videoWidth));
imageCanvas.toBlob(postFile, 'image/jpeg');
}
else {
console.error(xhr);
}
};
xhr.send(formdata);
}
//Start object detection
function startObjectDetection() {
console.log("starting object detection");
//Set canvas sizes base don input video
drawCanvas.width = v.videoWidth;
drawCanvas.height = v.videoHeight;
imageCanvas.width = uploadWidth;
imageCanvas.height = uploadWidth * (v.videoHeight / v.videoWidth);
//Some styles for the drawcanvas
drawCtx.lineWidth = 4;
drawCtx.strokeStyle = "cyan";
drawCtx.font = "20px Verdana";
drawCtx.fillStyle = "cyan";
//Save and send the first image
imageCtx.drawImage(v, 0, 0, v.videoWidth, v.videoHeight, 0, 0, uploadWidth, uploadWidth * (v.videoHeight / v.videoWidth));
imageCanvas.toBlob(postFile, 'image/jpeg');
}
//Starting events
//check if metadata is ready - we need the video size
v.onloadedmetadata = () => {
console.log("video metadata ready");
gotMetadata = true;
if (isPlaying)
startObjectDetection();
};
//see if the video has started playing
v.onplaying = () => {
console.log("video playing");
isPlaying = true;
if (gotMetadata) {
startObjectDetection();
}
};