-
Notifications
You must be signed in to change notification settings - Fork 3
/
posture.html
164 lines (137 loc) · 4.81 KB
/
posture.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
<!DOCTYPE html>
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>Posture Analyzer</title>
<link rel="stylesheet" href=".\styles.css">
<style>
body {
background-color: #000;
color: #fff;
}
ul {
list-style-type: none;
margin: 0;
padding: 0;
overflow: hidden;
background-color: rgb(220, 147, 236);
}
li {
float: left;
}
li a {
display: block;
color: white;
text-align: center;
padding: 14px 16px;
text-decoration: none;
}
li a:hover {
background-color: rgb(188, 217, 236);
}
button {
margin: auto;
padding: 10px;
font-size: large;
font-weight: 700;
}
.concanvas {
display: flex;
width: 90vw;
/* justify-content: center; */
margin: auto;
margin-top: 45px;
}
#canvas {
margin: auto;
}
#label-container {
margin-top: 20px;
text-align: center;
}
</style>
</head>
<body>
<div>
<h1 style="text-align:center">Posture Analyzer </h1>
</div>
<br>
<br>
<div>
<p style="text-align:center"> Good posture includes sitting upright, having the top one-third of the monitor at your
eye-level, and having your ears and shoulders aligned. </p>
<p style="text-align:center"> Bad posture includes slouching, uneven shoulders, and tilted heads. </p>
<p style="text-align:center"> Neutral posture occurs when no one is visible in the frame. </p>
</div>
<div style="display: flex; margin-top: 80px;">
<button type="button" onclick="init()">Start</button>
</div>
<div class="overlay">
</div>
<div class='concanvas'><canvas id="canvas"></canvas></div>
<div id="label-container"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@teachablemachine/[email protected]/dist/teachablemachine-pose.min.js"></script>
<script type="text/javascript">
// More API functions here:
// https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/pose
// the link to your model provided by Teachable Machine export panel
const URL = "https://teachablemachine.withgoogle.com/models/OuzZUBmwt/";
let model, webcam, ctx, labelContainer, maxPredictions;
async function init() {
const modelURL = URL + "model.json";
const metadataURL = URL + "metadata.json";
// load the model and metadata
// Refer to tmImage.loadFromFiles() in the API to support files from a file picker
// Note: the pose library adds a tmPose object to your window (window.tmPose)
model = await tmPose.load(modelURL, metadataURL);
maxPredictions = model.getTotalClasses();
// Convenience function to setup a webcam
const size = 200;
const flip = true; // whether to flip the webcam
webcam = new tmPose.Webcam(size, size, flip); // width, height, flip
await webcam.setup(); // request access to the webcam
await webcam.play();
window.requestAnimationFrame(loop);
// append/get elements to the DOM
const canvas = document.getElementById("canvas");
canvas.width = size; canvas.height = size;
ctx = canvas.getContext("2d");
labelContainer = document.getElementById("label-container");
for (let i = 0; i < maxPredictions; i++) { // and class labels
labelContainer.appendChild(document.createElement("div"));
}
}
async function loop(timestamp) {
webcam.update(); // update the webcam frame
await predict();
window.requestAnimationFrame(loop);
}
async function predict() {
// Prediction #1: run input through posenet
// estimatePose can take in an image, video or canvas html element
const { pose, posenetOutput } = await model.estimatePose(webcam.canvas);
// Prediction 2: run input through teachable machine classification model
const prediction = await model.predict(posenetOutput);
for (let i = 0; i < maxPredictions; i++) {
const classPrediction =
prediction[i].className + ": " + prediction[i].probability.toFixed(2);
labelContainer.childNodes[i].innerHTML = classPrediction;
}
// finally draw the poses
drawPose(pose);
}
function drawPose(pose) {
if (webcam.canvas) {
ctx.drawImage(webcam.canvas, 0, 0);
// draw the keypoints and skeleton
if (pose) {
const minPartConfidence = 0.5;
tmPose.drawKeypoints(pose.keypoints, minPartConfidence, ctx);
tmPose.drawSkeleton(pose.keypoints, minPartConfidence, ctx);
}
}
}
</script>
</body>
</html>