-
Notifications
You must be signed in to change notification settings - Fork 1
/
index.html
313 lines (271 loc) · 16 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<!-- CSS only -->
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-giJF6kkoqNQ00vy+HMDP7azOuL0xtbfIcaT9wjKHr8RbDVddVHyTfAAsrekwKmP1" crossorigin="anonymous">
<link href="styles.css" rel="stylesheet">
<link rel="shortcut icon" type="image/x-icon" href="robo.ico" />
<title>RESCON 1.0</title>
</head>
<body>
<!-- As a heading -->
<nav class="navbar navbar-light bg-light">
<div class="container-fluid">
<span class="navbar-brand mb-0 h1 mx-auto">RESCON 1.0</span>
</div>
</nav>
<div class="container">
<div class="row">
<div class="col-lg-6 md-12 sm-12">
<p class="wel text-start">WELCOME TO<br> <span class="mic">SRM MIC RESCON</span>
</p>
<table>
<tr>
<td> <span class="list-type">1</span></td>
<td class="li">Paper<br>Submission</td>
<td class="date">13th January'21<br><span class="small">Completed</span></td>
</tr>
<tr>
<td> <span class="list-type">2</span></td>
<td class="li">Project<br>Submission</td>
<td class="date">27th January'21<br><span class="small">Completed</span></td>
</tr>
<tr>
<td>
<span class="list-type">3</span></td>
<td class="li">Project<br>Presentation</td>
<td class="date">30th - 31st January'21<br><span class="small">Get Ready</span></td>
</tr>
</table>
</div>
<div class="robo col-lg-6 md-12 sm-12">
<img src="robo.png">
</div>
<div class="row row-2">
<h3 class="text-center">Spin the wheel and wait for your turn</h3>
<div class="col-lg-8 md-6 sm-6">
<div id="chart"></div>
</div>
<div class="col-lg-4 md-6 sm-6">
<div id="question">
<h1 class="text-center"></h1>
<h2 class="justify-content-center"></h2>
<h3 class="justify-content-center"></h3>
<h4></h4>
</div>
</div>
</div>
</div>
</div>
</div>
<footer class="page-footer">
<p class="text-center">Made by Team Pyxis</p>
</footer>
<script src="https://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<!-- JavaScript Bundle with Popper -->
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-ygbV9kiqUc6oa4msXn9868pTtWMgiQaeYH7/t7LECLbyPA2x65Kgf80OJFdroafW" crossorigin="anonymous"></script>
<script>
var padding = {
top: 40,
right: 60,
bottom: 0,
left: 0
},
w = 500 - padding.left - padding.right,
h = 500 - padding.top - padding.bottom,
r = Math.min(w, h) / 2,
rotation = 0,
oldrotation = 0,
picked = 100000,
oldpick = [],
color = ["rgba(138, 73, 149, 0.27)", "000"]
var data = [{
"label": "Team Pegasus",
"value": 1,
"title": "Fixing the train-test resolution discrepancy",
"text": "Data-augmentation is key to the training of neural networks for image classification. This paper first shows that existing augmentations induce a significant discrepancy between the typical size of the objects seen by the classifier at train and test time. We experimentally validate that, for a target test resolution, using a lower train resolution offers better classification at test time.We then propose a simple yet effective and efficient strategy to optimize the classifier performance when the train and test resolutions differ.",
"members": "Rishav Srijarko Anushka"
}, {
"label": "Team Cygnus",
"value": 2,
"title": "EfficientNet:Rethinking model scaling for convolutional neural networks",
"text": "Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet.",
"members": "Prathamesh Swarnabha Yudhajeet"
}, {
"label": "Team Lynx",
"value": 3,
"title": "Residual attention network for Image Classifcation",
"text": "In this work, we propose “Residual Attention Network”, a convolutional neural network using attention mechanism which can incorporate with state-of-art feed forward network architecture in an end-to-end training fashion. Our Residual Attention Network is built by stacking Attention Modules which generate attention-aware features. Importantly, we propose attention residual learning to train very deep Residual Attention Networks which can be easily scaled up to hundreds of layers.",
"members": "Kunal Harshit Pranav"
}, {
"label": "Team Crux",
"value": 4,
"title": "AdaBins: Depth estimation using adaptive bins",
"text": "AdaBins: Depth Estimation using Adaptive Bins We address the problem of estimating a high quality dense depth map from a single RGB input image. We start out with a baseline encoder-decoder convolutional neural network architecture and pose the question of how the global processing of information can help improve overall depth estimation. To this end, we propose a transformer based architecture block that divides the depth range into bins whose center value is estimated adaptively per image. The final depth values are estimated as linear combinations of the bin centers. We call our new building block AdaBins.",
"members": "Pranjal Aryan Aayush"
}, {
"label": "Team Phoenix",
"value": 5,
"title": "PaletteNet: Image Recolorization with given color palette",
"text": "Image recolorization enhances the visual perception of an image for design and artistic purposes. In this work, we present a deep neural network, referred to as PaletteNet, which recolors an image according to a given target color palette that is useful to express the color concept of an image. PaletteNet takes two inputs: a source image to be recolored and a target palette. PaletteNet is then designed to change the color concept of a source image so that the palette of the output image is close to the target palette.",
"members": "Abhishek Aakriti Saisha"
}, {
"label": "Team Libra",
"value": 6,
"title": "Text-Independent speaker verification using 3D convolutional neural networks",
"text": "In this paper, a novel method using 3D Convolutional Neural Network (3D-CNN) architecture has been proposed for speaker verification in the text-independent setting. One of the main challenges is the creation of the speaker models. Most of the previously-reported approaches create speaker models based on averaging the extracted features from utterances of the speaker, which is known as the d-vector system. In our paper, we propose an adaptive feature learning by utilizing the 3D-CNNs for direct speaker model creation in which, for both development and enrollment phases, an identical number of spoken utterances per speaker is fed to the network for representing the speakers’ utterances and creation of the speaker model. This leads to simultaneously capturing the speaker-related information and building a more robust system to cope with within-speaker variation.",
"members": "Rusali Devansh Irfhana"
}, {
"label": "Team Draco",
"value": 7,
"title": "Transfer learning from speaker verification to multispeaker text to speech synthesis",
"text": "We describe a neural network-based system for text-to-speech (TTS) synthesis that is able to generate speech audio in the voice of different speakers, including those unseen during training. Our system consists of three independently trained components: (1) a speaker encoder network, trained on a speaker verification task using an independent dataset of noisy speech without transcripts from thousands of speakers, to generate a fixed-dimensional embedding vector from only seconds of reference speech from a target speaker; (2) a sequence-to-sequence synthesis network based on Tacotron 2 that generates a mel spectrogram from text, conditioned on the speaker embedding; (3) an auto-regressive WaveNet-based vocoder network that converts the mel spectrogram into time domain waveform samples.",
"members": "Aradhya Paras Harsh"
}, {
"label": "Team Andromeda",
"value": 8,
"title": "On the automatic generation of medical imaging reports",
"text": "Report-writing can be error-prone for unexperienced physicians, and time- consuming and tedious for experienced physicians. To address these issues, we study the automatic generation of medical imaging reports. This task presents several challenges. First, a complete report contains multiple heterogeneous forms of information, including findings and tags. Second, abnormal regions in medical images are difficult to identify. Third, the re- ports are typically long, containing multiple sentences. To cope with these challenges, we (1) build a multi-task learning framework which jointly performs the pre- diction of tags and the generation of para- graphs, (2) propose a co-attention mechanism to localize regions containing abnormalities and generate narrations for them, (3) develop a hierarchical LSTM model to generate long paragraphs. We demonstrate the effectiveness of the proposed methods on two publicly available datasets.",
"members": "Sashrika Pooja Indira"
}, ];
var svg = d3.select('#chart')
.append("svg")
.data([data])
.attr("width", w + padding.left + padding.right)
.attr("height", h + padding.top + padding.bottom);
var container = svg.append("g")
.attr("class", "chartholder")
.attr("transform", "translate(" + (w / 2 + padding.left) + "," + (h / 2 + padding.top) + ")");
var vis = container
.append("g");
var pie = d3.layout.pie().sort(null).value(function(d) {
return 1;
});
// declare an arc generator function
var arc = d3.svg.arc().outerRadius(r);
// select paths, use arc generator to draw
var arcs = vis.selectAll("g.slice")
.data(pie)
.enter()
.append("g")
.attr("class", "slice");
arcs.append("path")
.attr("fill", function(d, i) {
if (i % 2 == 0)
return color[0];
else
return color[1];
})
.attr("d", function(d) {
return arc(d);
});
// add the text
arcs.append("text").attr("transform", function(d) {
d.innerRadius = 0;
d.outerRadius = r;
d.angle = (d.startAngle + d.endAngle) / 2;
return "rotate(" + (d.angle * 180 / Math.PI - 90) + ")translate(" + (d.outerRadius - 10) + ")";
})
.attr("text-anchor", "end")
.attr("fill", "white")
.text(function(d, i) {
return data[i].label;
});
container.on("click", spin);
function spin(d) {
container.on("click", null);
//all slices have been seen, all done
console.log("OldPick: " + oldpick.length, "Data length: " + data.length);
if (oldpick.length == data.length) {
console.log("done");
container.on("click", null);
return;
}
var ps = 360 / data.length,
pieslice = Math.round(1440 / data.length),
rng = Math.floor((Math.random() * 1440) + 360);
rotation = (Math.round(rng / ps) * ps);
picked = Math.round(data.length - (rotation % 360) / ps);
picked = picked >= data.length ? (picked % data.length) : picked;
if (oldpick.indexOf(picked) !== -1) {
d3.select(this).call(spin);
return;
} else {
oldpick.push(picked);
}
rotation += 90 - Math.round(ps / 2);
vis.transition()
.duration(3000)
.attrTween("transform", rotTween)
.each("end", function() {
//mark question as seen
d3.select(".slice:nth-child(" + (picked + 1) + ") path")
.attr("fill", "white");
//populate question
d3.select("#question h1")
.text(data[picked].label);
d3.select("#question h2")
.text(data[picked].title);
d3.select("#question h3")
.text(data[picked].text);
d3.select("#question h4")
.text(data[picked].members);
oldrotation = rotation;
/* Get the result value from object "data" */
console.log(data[picked].value)
/* Comment the below line for restrict spin to sngle time */
container.on("click", spin);
});
}
//make arrow
svg.append("g")
.attr("transform", "translate(" + (w + padding.left + padding.right) + "," + ((h / 2) + padding.top) + ")")
.append("path")
.attr("d", "M-" + (r * .15) + ",0L0," + (r * .05) + "L0,-" + (r * .05) + "Z")
.style({
"fill": "white"
});
//draw spin circle
container.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 30)
.style({
"fill": "white",
"cursor": "pointer"
});
//spin text
container.append("text")
.attr("x", 0)
.attr("y", 9)
.attr("text-anchor", "middle")
.text("SPIN")
.style({
"font-weight": "bold",
"font-size": "15px"
});
function rotTween(to) {
var i = d3.interpolate(oldrotation % 360, rotation);
return function(t) {
return "rotate(" + i(t) + ")";
};
}
function getRandomNumbers() {
var array = new Uint16Array(1000);
var scale = d3.scale.linear().range([360, 1440]).domain([0, 100000]);
if (window.hasOwnProperty("crypto") && typeof window.crypto.getRandomValues === "function") {
window.crypto.getRandomValues(array);
console.log("works");
} else {
//no support for crypto, get crappy random numbers
for (var i = 0; i < 1000; i++) {
array[i] = Math.floor(Math.random() * 100000) + 1;
}
}
return array;
}
</script>
</body>
</html>