diff --git a/annotationweb/urls.py b/annotationweb/urls.py
index ab79955..4bd9fd3 100644
--- a/annotationweb/urls.py
+++ b/annotationweb/urls.py
@@ -40,7 +40,8 @@
path('cardiac/', include('cardiac.urls')),
path('cardiac-plax/', include('cardiac_parasternal_long_axis.urls')),
path('cardiac-alax/', include('cardiac_apical_long_axis.urls')),
- path('spline-segmentation/', include('spline_segmentation.urls'))
+ path('spline-segmentation/', include('spline_segmentation.urls')),
+ path('video-annotation/', include('video_annotation.urls'))
]
# This is for making statics in a development environment
diff --git a/annotationweb/views.py b/annotationweb/views.py
index 467ecbb..f74a050 100644
--- a/annotationweb/views.py
+++ b/annotationweb/views.py
@@ -601,6 +601,8 @@ def get_redirection(task):
return 'cardiac_apical_long_axis:segment_image'
elif task.type == Task.SPLINE_SEGMENTATION:
return 'spline_segmentation:segment_image'
+ elif task.type == Task.VIDEO_ANNOTATION:
+ return 'video_annotation:process_image'
# @register.simple_tag
diff --git a/boundingbox/static/boundingbox/boundingbox.js b/boundingbox/static/boundingbox/boundingbox.js
index 509b311..dd2e712 100644
--- a/boundingbox/static/boundingbox/boundingbox.js
+++ b/boundingbox/static/boundingbox/boundingbox.js
@@ -259,3 +259,21 @@ function redrawSequence() {
g_context.drawImage(g_sequence[index], 0, 0, g_canvasWidth, g_canvasHeight);
redraw();
}
+
+function copyToNext() {
+ // TODO: Check content of g_boxes[g_currentFrameNr+1] -> Is it empty/exist? Overwrite?
+ if (g_currentFrameNr < g_sequenceLength + 1) {
+ var boxes_to_copy = g_boxes[g_currentFrameNr];
+ for (var i = 0; i < boxes_to_copy.length; i++) {
+ addBox(g_currentFrameNr + 1, boxes_to_copy[i].x, boxes_to_copy[i].y,
+ boxes_to_copy[i].x + boxes_to_copy[i].width,
+ boxes_to_copy[i].y + boxes_to_copy[i].height,
+ boxes_to_copy[i].label_id);
+ }
+ //console.log(boxes_to_copy)
+ // Uncomment the next two lines if you want a confirmation message.
+ // Note that this message will stay until the site is refreshed
+ // var messageBox = document.getElementById("message");
+ // messageBox.innerHTML = '
Content copied to the next frame!';
+ }
+}
diff --git a/boundingbox/templates/boundingbox/process_image.html b/boundingbox/templates/boundingbox/process_image.html
index 6c3f1c9..32df01d 100644
--- a/boundingbox/templates/boundingbox/process_image.html
+++ b/boundingbox/templates/boundingbox/process_image.html
@@ -13,6 +13,12 @@
{% endblock task_javascript %}
+{% block CopyContentButton %}
+
+
+
+{% endblock %}
+
{% block task_instructions %}
{# TODO Put task instructions here.. #}
{% endblock task_instructions %}
diff --git a/common/metaimage.py b/common/metaimage.py
index 2e136f0..97eea1c 100644
--- a/common/metaimage.py
+++ b/common/metaimage.py
@@ -16,6 +16,7 @@ def __init__(self, filename=None, data=None, channels=False):
self.attributes = {}
self.attributes['ElementSpacing'] = [1, 1, 1]
self.attributes['ElementNumberOfChannels'] = 1
+ self.attributes['Offset'] = [0, 0]
if filename is not None:
self.read(filename)
else:
@@ -48,6 +49,8 @@ def read(self, filename):
self.attributes[parts[0].strip()] = parts[1].strip()
if parts[0].strip() == 'ElementSpacing':
self.attributes['ElementSpacing'] = [float(x) for x in self.attributes['ElementSpacing'].split()]
+ if parts[0].strip() == 'Offset':
+ self.attributes['Offset'] = [float(x) for x in self.attributes['Offset'].split()]
dims = self.attributes['DimSize'].split(' ')
if len(dims) == 2:
@@ -73,7 +76,7 @@ def read(self, filename):
# Read uncompressed raw file (.raw)
self.data = np.fromfile(os.path.join(base_path, self.attributes['ElementDataFile']), dtype=np.uint8)
-
+ # TODO: are L80-84 duplicates of L55-59?
dims = self.attributes['DimSize'].split(' ')
if len(dims) == 2:
self.dim_size = (int(dims[0]), int(dims[1]))
@@ -114,6 +117,14 @@ def set_spacing(self, spacing):
def get_spacing(self):
return self.attributes['ElementSpacing']
+ def set_origin(self, origin):
+ if len(origin) != 2 and len(origin) != 3:
+ raise ValueError('Origin must have 2 or 3 components')
+ self.attributes['Offset'] = origin
+
+ def get_origin(self):
+ return self.attributes['Offset']
+
def get_metaimage_type(self):
np_type = self.data.dtype
if np_type == np.float32:
@@ -147,12 +158,13 @@ def write(self, filename, compress=False, compression_level=-1):
f.write('ElementType = ' + self.get_metaimage_type() + '\n')
f.write('ElementSpacing = ' + tuple_to_string(self.attributes['ElementSpacing']) + '\n')
f.write('ElementNumberOfChannels = ' + str(self.attributes['ElementNumberOfChannels']) + '\n')
+ f.write('Offset = ' + tuple_to_string(self.attributes['Offset']) + '\n')
if compress:
compressed_raw_data = zlib.compress(raw_data, compression_level)
f.write('CompressedData = True\n')
f.write('CompressedDataSize = ' + str(len(compressed_raw_data)) + '\n')
for key, value in self.attributes.items():
- if key not in ['NDims', 'DimSize', 'ElementType', 'ElementDataFile', 'CompressedData', 'CompressedDataSize', 'ElementSpacing', 'ElementNumberOfChannels']:
+ if key not in ['NDims', 'DimSize', 'ElementType', 'ElementDataFile', 'CompressedData', 'CompressedDataSize', 'ElementSpacing', 'ElementNumberOfChannels', 'Offset']:
f.write(key + ' = ' + value + '\n')
f.write('ElementDataFile = ' + raw_filename + '\n')
diff --git a/exporters/spline_segmentation_exporter.py b/exporters/spline_segmentation_exporter.py
index 200e190..ba31702 100644
--- a/exporters/spline_segmentation_exporter.py
+++ b/exporters/spline_segmentation_exporter.py
@@ -77,6 +77,9 @@ def add_subjects_to_path(self, path, data):
target_gt_name = os.path.splitext(target_name)[0]+"_gt.mhd"
filename = image_sequence.format.replace('#', str(frame.frame_nr))
+ image_metadata = None
+ if filename.endswith('mhd'):
+ image_metadata = MetaImage(filename=filename)
new_filename = join(subject_subfolder, target_name)
copy_image(filename, new_filename)
@@ -89,7 +92,7 @@ def add_subjects_to_path(self, path, data):
image_pil = PIL.Image.open(new_filename)
image_size = image_pil.size
spacing = [1, 1]
- self.save_segmentation(frame, image_size, join(subject_subfolder, target_gt_name), spacing)
+ self.save_segmentation(frame, image_size, join(subject_subfolder, target_gt_name), spacing, image_metadata)
return True, path
@@ -102,57 +105,8 @@ def get_object_segmentation(self, image_size, frame):
for label in labels:
objects = ControlPoint.objects.filter(label=label, image=frame).only('object').distinct()
for object in objects:
- previous_x = None
- previous_y = None
control_points = ControlPoint.objects.filter(label=label, image=frame, object=object.object).order_by('index')
- max_index = len(control_points)
- for i in range(max_index):
- if i == 0:
- first = max_index-1
- else:
- first = i-1
- a = control_points[first]
- b = control_points[i]
- c = control_points[(i+1) % max_index]
- d = control_points[(i+2) % max_index]
- length = sqrt((b.x - c.x)*(b.x - c.x) + (b.y - c.y)*(b.y - c.y))
- # Not a very elegant solution ... could try to estimate the spline length instead
- # or draw straight lines between consecutive points instead
- step_size = min(0.01, 1.0 / (length*2))
- for t in np.arange(0, 1, step_size):
- x = (2 * t * t * t - 3 * t * t + 1) * b.x + \
- (1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.x - a.x) + \
- (-2 * t * t * t + 3 * t * t) * c.x + \
- (1 - tension) * (t * t * t - t * t) * (d.x - b.x)
- y = (2 * t * t * t - 3 * t * t + 1) * b.y + \
- (1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.y - a.y) + \
- (-2 * t * t * t + 3 * t * t) * c.y + \
- (1 - tension) * (t * t * t - t * t) * (d.y - b.y)
-
- # Round and snap to borders
- x = int(round(x))
- x = min(image_size[1]-1, max(0, x))
- y = int(round(y))
- y = min(image_size[0]-1, max(0, y))
-
- if previous_x is not None and (abs(previous_x - x) > 1 or abs(previous_y - y) > 1):
- # Draw a straight line between the points
- end_pos = np.array([x,y])
- start_pos = np.array([previous_x,previous_y])
- direction = end_pos - start_pos
- segment_length = np.linalg.norm(end_pos - start_pos)
- direction = direction / segment_length # Normalize
- for i in np.arange(0.0, np.ceil(segment_length), 0.5):
- current = start_pos + direction * (float(i)/np.ceil(segment_length))
- current = np.round(current).astype(np.int32)
- current[0] = min(image_size[1]-1, max(0, current[0]))
- current[1] = min(image_size[0]-1, max(0, current[1]))
- segmentation[current[1], current[0]] = counter
-
- previous_x = x
- previous_y = y
-
- segmentation[y, x] = counter
+ self.draw_segmentation(image_size, control_points, canvas=segmentation, label=counter)
# Fill the hole
segmentation[binary_fill_holes(segmentation == counter)] = counter
@@ -161,14 +115,103 @@ def get_object_segmentation(self, image_size, frame):
return segmentation
- def save_segmentation(self, frame, image_size, filename, spacing):
+ @staticmethod
+ def draw_segmentation(image_size, control_points, label: int = 1, canvas: np.ndarray = None, tension: float = 0.5):
+ if canvas is None:
+ canvas = np.zeros(image_size, dtype=np.uint8)
+
+ previous_x = None
+ previous_y = None
+
+ max_index = len(control_points)
+ for i in range(max_index):
+ if i == 0:
+ first = max_index - 1
+ else:
+ first = i - 1
+ a = control_points[first]
+ b = control_points[i]
+ c = control_points[(i + 1) % max_index]
+ d = control_points[(i + 2) % max_index]
+ length = sqrt((b.x - c.x) * (b.x - c.x) + (b.y - c.y) * (b.y - c.y))
+ # Not a very elegant solution ... could try to estimate the spline length instead
+ # or draw straight lines between consecutive points instead
+ step_size = min(0.01, 1.0 / (length * 2))
+ for t in np.arange(0, 1, step_size):
+ x = (2 * t * t * t - 3 * t * t + 1) * b.x + \
+ (1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.x - a.x) + \
+ (-2 * t * t * t + 3 * t * t) * c.x + \
+ (1 - tension) * (t * t * t - t * t) * (d.x - b.x)
+ y = (2 * t * t * t - 3 * t * t + 1) * b.y + \
+ (1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.y - a.y) + \
+ (-2 * t * t * t + 3 * t * t) * c.y + \
+ (1 - tension) * (t * t * t - t * t) * (d.y - b.y)
+
+ # Round and snap to borders
+ x = int(round(x))
+ x = min(image_size[1] - 1, max(0, x))
+ y = int(round(y))
+ y = min(image_size[0] - 1, max(0, y))
+
+ if previous_x is not None and (abs(previous_x - x) > 1 or abs(previous_y - y) > 1):
+ # Draw a straight line between the points
+ end_pos = np.array([x, y])
+ start_pos = np.array([previous_x, previous_y])
+ direction = end_pos - start_pos
+ segment_length = np.linalg.norm(end_pos - start_pos)
+ direction = direction / segment_length # Normalize
+ for i in np.arange(0.0, np.ceil(segment_length), 0.5):
+ current = start_pos + direction * (float(i) / np.ceil(segment_length))
+ current = np.round(current).astype(np.int32)
+ current[0] = min(image_size[1] - 1, max(0, current[0]))
+ current[1] = min(image_size[0] - 1, max(0, current[1]))
+ canvas[current[1], current[0]] = label
+
+ previous_x = x
+ previous_y = y
+
+ canvas[y, x] = label
+
+ return canvas
+
+ @staticmethod
+ def compute_scaling(image_size, spacing):
+ if len(spacing) == 2:
+ aspect_ratio = image_size[0] / image_size[1]
+ new_aspect_ratio = image_size[0] * spacing[0] / (image_size[1] * spacing[1])
+ scale = new_aspect_ratio / aspect_ratio
+ pixel_scaling = np.divide(image_size, np.multiply(image_size, scale).astype(int))
+ else:
+ raise NotImplementedError('3D segmentations not implemented yet')
+ return pixel_scaling
+
+ def save_segmentation(self, frame, image_size, filename, spacing, image_metadata: MetaImage = None):
image_size = [image_size[1], image_size[0]]
- # Create compounded segmentation object
- segmentation = self.get_object_segmentation(image_size, frame)
+ if np.any(spacing != 1):
+ print('Anisotropic image detected')
+ segmentation = np.zeros(image_size, dtype=np.uint8)
+ labels = Label.objects.filter(task=frame.image_annotation.task).order_by('id')
+ scaling = self.compute_scaling(image_size, spacing)
+ # TODO: NotImplementedError will be triggered if we are dealing with 3D data
+ for label, label_id in enumerate(labels):
+ objects = ControlPoint.objects.filter(label=label_id, image=frame).only('object').distinct()
+ for object in objects:
+ control_points = ControlPoint.objects.filter(label=label_id, image=frame, object=object.object).order_by('index')
+ for point in control_points:
+ point.x *= scaling[0]
+ # Update segmentation
+ object_segmentation = self.draw_segmentation(image_size, control_points)
+ object_segmentation[binary_fill_holes(object_segmentation == 1)] = 1
+ segmentation[object_segmentation == 1] = label + 1
+ else:
+ # Create compounded segmentation object
+ segmentation = self.get_object_segmentation(image_size, frame)
segmentation_mhd = MetaImage(data=segmentation)
- segmentation_mhd.set_attribute('ImageQuality', frame.image_annotation.image_quality)
+ if image_metadata is not None:
+ segmentation_mhd.set_attribute('FrameType', image_metadata.get_metaimage_type())
+ segmentation_mhd.set_attribute('Offset', image_metadata.get_origin())
segmentation_mhd.set_spacing(spacing)
metadata = ImageMetadata.objects.filter(image=frame.image_annotation.image)
for item in metadata:
diff --git a/requirements.txt b/requirements.txt
index 95c0aa1..079d05b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,9 @@
-Django==2.2.*
-h5py==2.10.*
-numpy==1.19.*
-Pillow==7.1.*
-scipy>=1.5.0,<2.0
-django-otp==0.7.*
-qrcode==6.*
+Django==2.2.13
+django-otp==0.7.4
+h5py==2.10.0
+Pillow==7.1.0
+pytz==2021.1
+qrcode==6.1
+scipy==1.5.4
+six==1.15.0
+sqlparse==0.4.1
diff --git a/video_annotation/__init__.py b/video_annotation/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/video_annotation/admin.py b/video_annotation/admin.py
new file mode 100644
index 0000000..d8fb805
--- /dev/null
+++ b/video_annotation/admin.py
@@ -0,0 +1,5 @@
+from django.contrib import admin
+from .models import *
+
+# Register your models here.
+admin.site.register(VideoAnnotation)
diff --git a/video_annotation/apps.py b/video_annotation/apps.py
new file mode 100644
index 0000000..f3dab8c
--- /dev/null
+++ b/video_annotation/apps.py
@@ -0,0 +1,5 @@
+from django.apps import AppConfig
+
+
+class VideoAnnotation(AppConfig):
+ name = 'video_annotation'
diff --git a/video_annotation/forms.py b/video_annotation/forms.py
new file mode 100644
index 0000000..eb46c6a
--- /dev/null
+++ b/video_annotation/forms.py
@@ -0,0 +1,3 @@
+from django import forms
+from .models import *
+
diff --git a/video_annotation/models.py b/video_annotation/models.py
new file mode 100644
index 0000000..dc0f01d
--- /dev/null
+++ b/video_annotation/models.py
@@ -0,0 +1,9 @@
+from django.db import models
+from annotationweb.models import KeyFrameAnnotation, Label
+
+
+class VideoAnnotation(models.Model):
+ image = models.ForeignKey(KeyFrameAnnotation, on_delete=models.CASCADE)
+ x = models.PositiveIntegerField()
+ y = models.PositiveIntegerField()
+ label = models.ForeignKey(Label, on_delete=models.CASCADE)
diff --git a/video_annotation/static/video_annotation/video_annotation.js b/video_annotation/static/video_annotation/video_annotation.js
new file mode 100644
index 0000000..1b4086c
--- /dev/null
+++ b/video_annotation/static/video_annotation/video_annotation.js
@@ -0,0 +1,335 @@
+var g_backgroundImage;
+var g_paint = false;
+var g_frameNr;
+var g_currentColor = null;
+var g_BBx;
+var g_BBy;
+var g_BBx2;
+var g_BBy2;
+var g_boxes = {}; // Dictionary with keys frame_nr which each has a list of boxes
+var g_minimumSize = 10;
+var g_move = false;
+var g_resize = false;
+var g_invalidBoxNr = 999999;
+var g_currentBox = g_invalidBoxNr;
+var g_cornerSize = 20;
+var g_bbox_found = false;
+
+function setupSegmentation() {
+ console.log('setting up video annotation....');
+
+ // Define event callbacks
+ $('#canvas').mousedown(function(e) {
+
+ // TODO check if current frame is not the frame to segment
+ var pos = mousePos(e, this);
+ g_BBx = pos.x;
+ g_BBy = pos.y;
+ var insideBox = isInsideBox(pos.x, pos.y);
+ if(insideBox.isInside) {
+ g_currentBox = insideBox.boxNr
+ if(insideBox.isInsideCorner)
+ g_resize = true;
+ else
+ g_move = true;
+ return;
+ }
+ g_paint = true;
+ console.log('started BB on ' + g_BBx + ' ' + g_BBy);
+ });
+
+ $('#canvas').mousemove(function(e) {
+ var pos = mousePos(e, this);
+ if(g_paint) {
+ g_BBx2 = pos.x;
+ g_BBy2 = pos.y;
+ redrawSequence();
+ return;
+ }
+
+ //Position diff since last mouse position
+ var xDiff = pos.x - g_BBx;
+ var yDiff = pos.y - g_BBy;
+
+ //Update initial position while moving or resizing.
+ g_BBx = pos.x;
+ g_BBy = pos.y;
+
+ if(g_move) {
+ moveBox(g_currentBox, xDiff, yDiff);
+ return;
+ }
+ if(g_resize) {
+ resizeBox(g_currentBox, xDiff, yDiff);
+ return;
+ }
+ });
+
+ $('#canvas').mouseup(function(e){
+ g_move = false;
+ g_resize = false;
+ if(!g_paint)
+ return;
+ g_paint = false;
+ g_annotationHasChanged = true;
+ propagateModification(g_currentFrameNr, g_BBx, g_BBy, g_BBx2, g_BBy2, g_currentLabel,true)
+ // for (i = g_currentFrameNr; i < g_frameNr; i++) {
+ // addBox(i, g_BBx, g_BBy, g_BBx2, g_BBy2, g_currentLabel);
+ // console.log('finished BB on ' + g_BBx + ' ' + g_BBy + ' in frame ' + i);
+ // }
+ });
+
+ $('#canvas').mouseleave(function(e){
+ if(g_paint) {
+ g_annotationHasChanged = true;
+ addBox(g_currentFrameNr, g_BBx, g_BBy, g_BBx2, g_BBy2, g_currentLabel);
+ redrawSequence();
+ g_paint = false;
+ }
+ });
+
+ $('#canvas').dblclick(function(e){
+ var pos = mousePos(e, this);
+ insideBox = isInsideBox(pos.x, pos.y);
+ if(insideBox.isInside)
+ // TODO: eliminate all the boxes like this one in the next frames
+ var box = g_boxes[g_currentFrameNr][insideBox.boxNr];
+ propagateModification(g_currentFrameNr,
+ box.x, box.y, box.x + box.width, box.y + box.height, box.label, false, true);
+ });
+
+ $("#clearButton").click(function() {
+ g_annotationHasChanged = true;
+ g_boxes = {};
+ $('#slider').slider('value', g_frameNr); // Update slider
+ redrawSequence();
+ });
+
+ // Set first label active
+ changeLabel(g_labelButtons[0].id);
+ redrawSequence();
+}
+
+function mousePos(e, canvas) {
+ var scale = g_canvasWidth / $('#canvas').width();
+ var mouseX = (e.pageX - canvas.offsetLeft)*scale;
+ var mouseY = (e.pageY - canvas.offsetTop)*scale;
+ return {
+ x: mouseX,
+ y: mouseY,
+ }
+}
+
+function isInsideBox(x, y) {
+ var boxNr = g_invalidBoxNr;
+ var isInside = false;
+ var isInsideCorner = false;
+
+ if(g_currentFrameNr in g_boxes) {
+ for(var i = 0; i < g_boxes[g_currentFrameNr].length; ++i) {
+ var box = g_boxes[g_currentFrameNr][i];
+ if(((x >= box.x) && (x <= (box.x+box.width))) && ((y >= box.y) && (y <= (box.y+box.height))) ) {
+ isInside = true;
+ if(!isInsideCorner)
+ boxNr = i;//Don't change boxnr if we are inside the corner of another box
+ if((x >= (box.x+box.width-g_cornerSize)) && (y >= (box.y+box.height-g_cornerSize)))//Lower right
+ isInsideCorner = true;
+ }
+ }
+ }
+// console.log('isInside: ' + isInside + ' ' + boxNr)
+ return {
+ isInside: isInside,
+ boxNr: boxNr,
+ isInsideCorner: isInsideCorner,
+ };
+}
+
+function removeBox(frame_nr, boxNr)
+{
+ console.log('removeBox: ' + boxNr);
+ var removedBox = g_boxes[frame_nr].splice(boxNr, 1);
+ g_annotationHasChanged = true;
+ redrawSequence();
+}
+
+function propagateModification(frame_nr, x, y, x2, y2, label, create, erase) {
+ // Add new/erase bbox to the next frames after frame_nr
+ var frame_range = range(frame_nr, g_sequenceLength + 1, 1)
+ frame_range.forEach(frame_nr => annotationExists(frame_nr, x, y, x2, y2, label, create, erase))
+}
+
+function annotationExists(frame_nr, x, y, x2, y2, label, create, erase) {
+ if (!(frame_nr in g_boxes)) {
+ // If the frame has no annotations, then create a new one
+ if (create)
+ addBox(frame_nr, x, y, x2, y2, label);
+ } else {
+ // If the frame already has annotations, check them and update/create/remove one with the given information
+ g_bbox_found = false;
+ //Update or remove
+ g_boxes[frame_nr].forEach((bbox, idx, obj) => findBoxAndUpdate(bbox, x, y, x2, y2, label, !erase, idx, obj))
+ // Clean up after removeBox
+ if (g_boxes[frame_nr].length === 0)
+ delete g_boxes[frame_nr];
+
+ if (!g_bbox_found && create)
+ // The entry frame_nr has boxes but not the one we are looking for, but we want to create it
+ addBox(frame_nr, x, y, x2, y2, label);
+ }
+}
+
+function findBoxAndUpdate(bbox, x, y, x2, y2, label, update, idx, actual_list) {
+ //console.log('Frame ' + frame_nr + ' has bbox: '+ bbox);
+ var returnBbox = bbox;
+ if (returnBbox !== undefined && returnBbox.label === label) {
+ g_bbox_found = true
+ // If the frame already has the bbox, then...
+ if (update) {
+ // ...update the location
+ returnBbox.x = x;
+ returnBbox.y = y;
+ returnBbox.height = y2 - y;
+ returnBbox.width = x2 - x;
+ }
+ else {
+ // ...remove the annotation
+ actual_list.splice(idx, 1)
+ return
+ }
+ }
+ return returnBbox
+}
+
+function moveBox(boxNr, xDiff, yDiff)
+{
+ var box = g_boxes[g_currentFrameNr][boxNr];
+ propagateModification(g_currentFrameNr,box.x + xDiff, box.y + yDiff,
+ box.x + xDiff + box.width, box.y + yDiff + box.height, box.label, true)
+ redrawSequence();
+}
+
+function resizeBox(boxNr, xDiff, yDiff)
+{
+ var box = g_boxes[g_currentFrameNr][boxNr];
+ if(box.width < (-xDiff + g_minimumSize)){
+ xDiff = 0;
+ }
+ if(box.height < (-yDiff + g_minimumSize)){
+ yDiff = 0;
+ }
+ propagateModification(g_currentFrameNr, box.x, box.y,
+ box.x + box.width + xDiff, box.y + box.height + yDiff, box.label, true)
+ redrawSequence();
+}
+
+function createBox(x, y, x2, y2, label) {
+ // Select the one closest to 0,0
+ var boxOriginX = min(x, x2);
+ var boxOriginY = min(y, y2);
+
+ // Calculate width and height
+ var width = max(x, x2) - boxOriginX;
+ var height = max(y, y2) - boxOriginY;
+
+ // Find label index
+ var labelIndex = 0;
+ for(var i = 0; i < g_labelButtons.length; i++) {
+ if(g_labelButtons[i].id === label) {
+ labelIndex = i;
+ }
+ }
+
+ var box = {
+ x: boxOriginX,
+ y: boxOriginY,
+ width: width,
+ height: height,
+ label_id: label, // actual DB id
+ label: labelIndex // index: only used for color
+ };
+ return box;
+}
+
+function addBox(frame_nr, x, y, x2, y2, label) {
+ // Only add box if large enough
+ if(Math.abs(x2 - x) > g_minimumSize && Math.abs(y2 - y) > g_minimumSize) {
+ var box = createBox(x, y, x2, y2, label);
+ if(!(frame_nr in g_boxes))
+ g_boxes[frame_nr] = [];
+ g_boxes[frame_nr].push(box);
+ }
+}
+
+function range(start = 0, stop, step = 1) {
+ var size = Math.ceil((stop - start) / step)
+ return [...Array(size).keys()].map(i => i*step + start);
+}
+
+function sendDataForSave() {
+ return $.ajax({
+ type: "POST",
+ url: "/boundingbox/save/",
+ data: {
+ image_id: g_imageID,
+ boxes: JSON.stringify(g_boxes),
+ task_id: g_taskID,
+ target_frames: JSON.stringify(g_targetFrames),
+ quality: $('input[name=quality]:checked').val(),
+ rejected: g_rejected ? 'true':'false',
+ comments: $('#comments').val(),
+ },
+ dataType: "json" // Need this do get result back as JSON
+ });
+}
+
+function loadVideoAnnotationTask(image_sequence_id) {
+ console.log('In video annotation task load')
+
+ g_backgroundImage = new Image();
+ g_backgroundImage.src = '/show_frame/' + image_sequence_id + '/' + 0 + '/' + g_taskID + '/';
+ g_backgroundImage.onload = function() {
+ g_canvasWidth = this.width;
+ g_canvasHeight = this.height;
+ setupSegmentation();
+ };
+
+}
+
+function redraw(){
+ var box, label;
+
+ // Draw current box
+ if(g_paint) {
+ g_context.beginPath();
+ g_context.lineWidth = 2;
+ box = createBox(g_BBx, g_BBy, g_BBx2, g_BBy2, g_currentLabel);
+ label = g_labelButtons[box.label];
+ g_context.strokeStyle = colorToHexString(label.red, label.green, label.blue);
+ g_context.rect(box.x, box.y, box.width, box.height);
+ g_context.stroke();
+ }
+
+ if(!(g_currentFrameNr in g_boxes))
+ return;
+
+ // Draw all stored boxes
+ for(var i = 0; i < g_boxes[g_currentFrameNr].length; ++i) {
+ g_context.beginPath();
+ g_context.lineWidth = 2;
+ box = g_boxes[g_currentFrameNr][i];
+ label = g_labelButtons[box.label];
+ g_context.strokeStyle = colorToHexString(label.red, label.green, label.blue);
+ g_context.rect(box.x, box.y, box.width, box.height);
+ g_context.moveTo(box.x+box.width-g_cornerSize, box.y+box.height);
+ g_context.lineTo(box.x+box.width, box.y+box.height-g_cornerSize);
+ g_context.stroke();
+ }
+}
+
+// Override redraw sequence in sequence.js
+function redrawSequence() {
+ var index = g_currentFrameNr - g_startFrame;
+ g_context.drawImage(g_sequence[index], 0, 0, g_canvasWidth, g_canvasHeight);
+ redraw();
+}
diff --git a/video_annotation/templates/video_annotation/process_image.html b/video_annotation/templates/video_annotation/process_image.html
new file mode 100644
index 0000000..087cf91
--- /dev/null
+++ b/video_annotation/templates/video_annotation/process_image.html
@@ -0,0 +1,18 @@
+{% extends 'annotationweb/do_task.html' %}
+
+{% block task_javascript %}
+
+{# Add previously stored boxes if they exist #}
+{% for box in boxes %}
+addBox({{ box.image.frame_nr }}, {{ box.x }}, {{ box.y }}, {{ box.x }} + {{ box.width }}, {{ box.y }} + {{ box.height }}, {{ box.label_id }});
+{% endfor %}
+
+{% if image_sequence %}
+loadVideoAnnotationTask({{ image_sequence.id }});
+{% endif %}
+
+{% endblock task_javascript %}
+
+{% block task_instructions %}
+ TODO Put task instructions here..
+{% endblock task_instructions %}
diff --git a/video_annotation/tests.py b/video_annotation/tests.py
new file mode 100644
index 0000000..7ce503c
--- /dev/null
+++ b/video_annotation/tests.py
@@ -0,0 +1,3 @@
+from django.test import TestCase
+
+# Create your tests here.
diff --git a/video_annotation/urls.py b/video_annotation/urls.py
new file mode 100644
index 0000000..6b9b92d
--- /dev/null
+++ b/video_annotation/urls.py
@@ -0,0 +1,10 @@
+from django.urls import path
+
+from . import views
+
+app_name = 'video_annotation'
+urlpatterns = [
+ path('process/
/', views.process_next_image, name='process_image'),
+ path('process///', views.process_image, name='process_image'),
+ path('save/', views.save_boxes, name='save'),
+]
diff --git a/video_annotation/views.py b/video_annotation/views.py
new file mode 100644
index 0000000..b3e287d
--- /dev/null
+++ b/video_annotation/views.py
@@ -0,0 +1,70 @@
+from django.contrib.admin.views.decorators import staff_member_required
+from django.shortcuts import render, redirect
+from .models import *
+from annotationweb.models import Task, ImageAnnotation
+from .forms import *
+from django.contrib import messages
+from django.http import HttpResponse, Http404, JsonResponse, HttpResponseRedirect
+import random
+import json
+import common.task
+
+
+def process_next_image(request, task_id):
+ return process_image(request, task_id, None)
+
+
+def process_image(request, task_id, image_id):
+ try:
+ context = common.task.setup_task_context(request, task_id, Task.VIDEO_ANNOTATION, image_id)
+ context['javascript_files'] = ['video_annotation/video_annotation.js']
+
+ # Load boxes if they exist
+ try:
+ annotations = KeyFrameAnnotation.objects.filter(image_annotation__task_id=task_id,
+ image_annotation__image_id=image_id)
+ context['boxes'] = VideoAnnotation.objects.filter(image__in=annotations)
+ context['target_frames'] = annotations
+ except KeyFrameAnnotation.DoesNotExist:
+ pass
+
+ return render(request, 'video_annotation/process_image.html', context)
+ except common.task.NoMoreImages:
+ messages.info(request, 'This task is finished, no more images to segment.')
+ return redirect('index')
+ except RuntimeError as e:
+ messages.error(request, str(e))
+ return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
+
+
+def save_boxes(request):
+ try:
+ annotations = common.task.save_annotation(request)
+ boxes = json.loads(request.POST['boxes'])
+
+ # Store every box
+ for annotation in annotations:
+ frame_nr = str(annotation.frame_nr)
+ for box in boxes[frame_nr]:
+ bb = VideoAnnotation()
+ bb.x = int(box['x'])
+ bb.y = int(box['y'])
+ bb.width = int(box['width'])
+ bb.height = int(box['height'])
+ bb.image = annotation
+ bb.label_id = int(box['label_id'])
+ bb.save()
+
+ response = {
+ 'success': 'true',
+ 'message': 'Completed'
+ }
+ messages.success(request, str(len(boxes)) + ' boxes were saved')
+ except Exception as e:
+ response = {
+ 'success': 'false',
+ 'message': str(e)
+ }
+
+ return JsonResponse(response)
+