diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a333f5b11..87341247c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -92,8 +92,8 @@ jobs: - name: Publish to Registry uses: docker/build-push-action@v1 with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} repository: ubcctlt/compair-app tag_with_ref: true - name: Trigger deploy diff --git a/CHANGELOG.md b/CHANGELOG.md index c05b2dd7b..7e23adb2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,25 +1,31 @@ # v1.3 -## Notable changes - -* Upgrades: There is a high risk of regressions due to many upgrades to the libraries used by ComPAIR. +## Notable changes +* Many dependencies, both frontend and backend, were updated. ### New Features -* 1. "Download All" attachments button was added to generate and download a zip file of all submitted attachments in answers -* 2. The "Assignment End-Date" feature was added for admin users to query for the assignments end-date. - +* "Download All" attachments button was added to generate and download a zip file of all student submitted answer attachments. This can be found in an assignment's "Participation" tab under the "Attachments" column. +* The "Assignment End-Date" tool was added for admin users to query for the assignments end-date. + * The purpose of this page is to search for ongoing or active assignments on a given date, to help plan potential schedules for testing, staging, and production environments. -### Environment Variable Changes -* CELERY_ALWAYS_EAGER is now CELERY_TASK_ALWAYS_EAGER - * Default: false - -### New Environment Variables: For controlling memory leak growth in Kubernetes +### New Environment Variables: For controlling worker memory leak * CELERY_WORKER_MAX_TASKS_PER_CHILD - Kills a worker process and forks a new one when it has executed the given number of tasks. - * Default to 20 * CELERY_WORKER_MAX_MEMORY_PER_CHILD - Set to memory in kilobytes. Kills a worker process and forks a new one when it hits the given memory usage, the currently executing task will be allowed to complete before being killed. - * Default to 600MB +## Breaking Changes +Celery 4 introduced a new all lowercase environment variables system. ComPAIR +is now using this new system. To adapt a Celery environment variable to +ComPAIR, convert the original Celery variable to all uppercase and prefix it +"CELERY\_". ComPAIR will strip the prefix and lowercase the variable before +passing it to Celery. A few Celery environment variables were renamed in the +new system, the ones supported in ComPAIR are: + +* CELERY_ALWAYS_EAGER is now CELERY_TASK_ALWAYS_EAGER + * Set to true if running stock standalone, see `compair/settings.py`. + * Set to false if running via repo's docker-compose.yml +* BROKER_TRANSPORT_OPTIONS is now CELERY_BROKER_TRANSPORT_OPTIONS +* CELERYBEAT_SCHEDULE is now CELERY_BEAT_SCHEDULE # v1.2.12 diff --git a/compair/api/assignment.py b/compair/api/assignment.py index 40d375694..2aa93a0f2 100644 --- a/compair/api/assignment.py +++ b/compair/api/assignment.py @@ -36,6 +36,7 @@ def non_blank_text(value): new_assignment_parser.add_argument('answer_end', required=True, nullable=False) new_assignment_parser.add_argument('compare_start', default=None) new_assignment_parser.add_argument('compare_end', default=None) +new_assignment_parser.add_argument('compare_localTimeZone', default='UTC') new_assignment_parser.add_argument('self_eval_start', default=None) new_assignment_parser.add_argument('self_eval_end', default=None) new_assignment_parser.add_argument('self_eval_instructions', type=non_blank_text, default=None) diff --git a/compair/api/assignment_attachment.py b/compair/api/assignment_attachment.py index 51bc209d3..80b0e791b 100644 --- a/compair/api/assignment_attachment.py +++ b/compair/api/assignment_attachment.py @@ -23,9 +23,13 @@ # differs completely from that used by file.py, I've had to split it out. -# given an assignment, download all attachments in that assignment. -# /api/courses//assignments//attachments/download -class DownloadAllAttachmentsAPI(Resource): +# given an assignment, download all student attachments in that assignment. +# This is restricted to only student answers to match the behaviour of the +# "Participation" tab in the UI, where it only lists students. +# /api/courses//assignments//attachments/download_students +class DownloadAllStudentAttachmentsAPI(Resource): + DELIM = ' - ' + @login_required def get(self, course_uuid, assignment_uuid): # course unused, but we need to call it to check if it's a valid course @@ -38,13 +42,21 @@ def get(self, course_uuid, assignment_uuid): message="Sorry, your system role does not allow downloading all attachments") # grab answers so we can see how many has files - answers = self.getAnswersByAssignment(assignment) + answers = self.getStudentAnswersByAssignment(assignment) fileIds = [] + fileAuthors = {} for answer in answers: if not answer.file_id: continue # answer has an attachment fileIds.append(answer.file_id) + # the user who uploaded the file can be different from the answer + # author (e.g. instructor can upload on behalf of student), so + # we need to use the answer author instead of file uploader + author = answer.user_fullname + if answer.user_student_number: + author += self.DELIM + answer.user_student_number + fileAuthors[answer.file_id] = author if not fileIds: return {'msg': 'Assignment has no attachments'} @@ -64,13 +76,9 @@ def get(self, course_uuid, assignment_uuid): current_app.config['ATTACHMENT_UPLOAD_FOLDER'], srcFile.name ) - # set filename to 'full name - student number - uuid.ext' - # omit student number or extension if not exist - delim = ' - ' - srcFileName = srcFile.user.fullname - if srcFile.user.student_number: - srcFileName += delim + srcFile.user.student_number - srcFileName += delim + srcFile.name + # filename should be 'full name - student number - uuid.ext' + # student number is omitted if user doesn't have one + srcFileName = fileAuthors[srcFile.id] + self.DELIM + srcFile.name #current_app.logger.debug("writing " + srcFileName) zipFile.write(srcFilePath, srcFileName) #current_app.logger.debug("Writing zip file") @@ -79,7 +87,7 @@ def get(self, course_uuid, assignment_uuid): # this really should be abstracted out into the Answer model, but I wasn't # able to get the join with UserCourse to work inside the Answer model. - def getAnswersByAssignment(self, assignment): + def getStudentAnswersByAssignment(self, assignment): return Answer.query \ .outerjoin(UserCourse, and_( Answer.user_id == UserCourse.user_id, @@ -91,7 +99,10 @@ def getAnswersByAssignment(self, assignment): Answer.practice == False, Answer.draft == False, or_( - and_(UserCourse.course_role != CourseRole.dropped, Answer.user_id != None), + and_( + UserCourse.course_role == CourseRole.student, + Answer.user_id != None + ), Answer.group_id != None ) )) \ @@ -102,4 +113,4 @@ def getFilesByIds(self, fileIds): filter(File.id.in_(fileIds)).all() -api.add_resource(DownloadAllAttachmentsAPI, '/download') +api.add_resource(DownloadAllStudentAttachmentsAPI, '/download_students') diff --git a/compair/api/assignment_search_enddate.py b/compair/api/assignment_search_enddate.py index 48c4f9509..e9b50f401 100644 --- a/compair/api/assignment_search_enddate.py +++ b/compair/api/assignment_search_enddate.py @@ -4,6 +4,7 @@ from sqlalchemy import create_engine import json from flask import jsonify +import pytz from bouncer.constants import READ, EDIT, CREATE, DELETE, MANAGE from flask import Blueprint, current_app @@ -24,13 +25,11 @@ from .util import new_restful_api, get_model_changes, pagination_parser from datetime import datetime +import time assignment_search_enddate_api = Blueprint('assignment_search_enddate_api', __name__) api = new_restful_api(assignment_search_enddate_api) -##event -on_assignment_get = event.signal('ASSIGNMENT_GET') - def validate(date_text): try: if date_text != datetime.strptime(date_text, "%Y-%m-%d").strftime('%Y-%m-%d'): @@ -43,29 +42,41 @@ class AssignmentRootAPI1(Resource): @login_required def get(self): + # get app timezone in settings + appTimeZone = current_app.config.get('APP_TIMEZONE', time.strftime('%Z') ) + search_date_assignment_parser = RequestParser() search_date_assignment_parser.add_argument('compare_start', default=datetime.now().strftime("%Y-%m-%d")) search_date_assignment_parser.add_argument('compare_end', default=datetime.now().strftime("%Y-%m-%d")) + search_date_assignment_parser.add_argument('compare_localTimeZone', default=appTimeZone) + args = search_date_assignment_parser.parse_args() - end_date = datetime.now().strftime("%Y-%m-%d") + end_date = datetime.now().strftime("%Y-%m-%d 00:00:00") start_date = datetime.now().strftime("%Y-%m-%d") + compare_localTimeZone = appTimeZone + if (args['compare_localTimeZone']): + compare_localTimeZone = str(args['compare_localTimeZone']) if validate(args['compare_end']): - end_date = str(args['compare_end']) + end_date = str(args['compare_end']) + ' 00:00:00' + + ##convert this to System TZ + local = pytz.timezone(compare_localTimeZone) + naive = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S") + local_dt = local.localize(naive, is_dst=None) + systemTZ_dt = local_dt.astimezone(pytz.timezone(appTimeZone)) + end_date = str(systemTZ_dt) + if validate(args['compare_start']): start_date = str(args['compare_start']) db_url = str(current_app.config['SQLALCHEMY_DATABASE_URI']) engine = create_engine(db_url, pool_size=5, pool_recycle=3600) conn = engine.connect() - ##sql_text = str("SELECT JSON_OBJECT('uuid', uuid,'name', name,'compare_start', compare_start, 'compare_end', compare_end) FROM assignment;"); - ##sql_text = str("SELECT JSON_OBJECT('uuid', uuid,'name', name,'compare_start', compare_start, 'compare_end', compare_end) FROM assignment WHERE compare_end >= '" + end_date + "';"); - ##sql_text = str("SELECT JSON_OBJECT('uuid', uuid,'name', name,'answer_start', answer_start,'answer_end', answer_end,'compare_start', compare_start, 'compare_end', compare_end) FROM assignment WHERE compare_end >= '" + end_date + "' OR answer_end >= '" + end_date + "';"); - sql_text = str("SELECT JSON_OBJECT('uuid', uuid,'name', name,'answer_start', date_format(answer_start, '%%M %%d, %%Y'),'answer_end', date_format(answer_end, '%%M %%d, %%Y'),'compare_start', date_format(compare_start, '%%M %%d, %%Y'), 'compare_end', date_format(compare_end, '%%M %%d, %%Y')) FROM assignment WHERE compare_end >= '" + end_date + "' OR answer_end >= '" + end_date + "';"); - ##print(sql_text) + sql_text = str("SELECT JSON_OBJECT('course_name', t1.name,'name', t2.name,'answer_start', date_format(CONVERT_TZ(t2.answer_start, '" + appTimeZone + "','" + compare_localTimeZone + "'), '%%b %%d, %%Y'),'answer_end', date_format(CONVERT_TZ(t2.answer_end, '" + appTimeZone + "','" + compare_localTimeZone + "'), '%%b %%d, %%Y'),'compare_start', date_format(CONVERT_TZ(t2.compare_start, '" + appTimeZone + "','" + compare_localTimeZone + "'), '%%b %%d, %%Y'), 'compare_end', date_format(CONVERT_TZ(t2.compare_end, '" + appTimeZone + "','" + compare_localTimeZone + "'), '%%b %%d, %%Y'), 'self_eval_end', date_format(CONVERT_TZ(t2.self_eval_end, '" + appTimeZone + "','" + compare_localTimeZone + "'), '%%b %%d, %%Y'), 'self_eval_start', date_format(CONVERT_TZ(t2.self_eval_start, '" + appTimeZone + "','" + compare_localTimeZone + "'), '%%b %%d, %%Y')) FROM course as t1, assignment as t2 WHERE (t1.id = t2.course_id) AND (t2.active=TRUE AND t1.active=TRUE) AND (t2.compare_end >= '" + end_date + "' OR answer_end >= '" + end_date + "' OR self_eval_end >= '" + end_date + "');"); result = conn.execute(sql_text) diff --git a/compair/configuration.py b/compair/configuration.py index 41b471014..65071b0ef 100644 --- a/compair/configuration.py +++ b/compair/configuration.py @@ -23,6 +23,8 @@ import os import json import re +import pytz +import time from distutils.util import strtobool from flask import Config @@ -91,7 +93,7 @@ 'KALTURA_SECRET', 'KALTURA_PLAYER_ID', 'MAIL_SERVER', 'MAIL_DEBUG', 'MAIL_USERNAME', 'MAIL_PASSWORD', 'MAIL_DEFAULT_SENDER', 'MAIL_SUPPRESS_SEND', - 'GA_TRACKING_ID' + 'GA_TRACKING_ID', 'APP_TIMEZONE' ] env_bool_overridables = [ @@ -150,3 +152,7 @@ config['APP_LOGIN_ENABLED'] = True config['CAS_LOGIN_ENABLED'] = False config['SAML_LOGIN_ENABLED'] = False + +# configuring APP_TIMEZONE +if not(config['APP_TIMEZONE'] in pytz.all_timezones): + config['APP_TIMEZONE'] = time.strftime('%Z') diff --git a/compair/manage/kaltura.py b/compair/manage/kaltura.py new file mode 100644 index 000000000..fe037d594 --- /dev/null +++ b/compair/manage/kaltura.py @@ -0,0 +1,275 @@ +""" +Migrate Kaltura media to new Kaltura instance. We're switching from on-prem +hosted Kaltura to cloud Kaltura. When videos are transferred to the new Kaltura +instance, a new entry ID is generated. We will be given a CSV mapping of old +to new entry IDs, and will have to update our Kaltura data accordingly. + +Assuming that collision between old and new entry IDs are impossible. + +Requires that these Kaltura env vars are set to the new Kaltura environment: + +* KALTURA_SERVICE_URL +* KALTURA_PARTNER_ID +* KALTURA_SECRET +* KALTURA_USER_ID +* KALTURA_PLAYER_ID + +Usage options, run in app root: + + python manage.py kaltura migrate /path/to/mappingCsv.csv + +-d Do a dry run, without actually making any changes to the database: + + python manage.py kaltura migrate -d /path/to/mappingCsv.csv + +-n If present, tells the CSV parser not to skip the first row. By default, we +assume the first row is a header row and skip it: + + python manage.py kaltura migrate -n /path/to/mappingCsv.csv + +""" + +import csv +from datetime import datetime +import re +from urllib.parse import unquote_plus + +from KalturaClient import KalturaClient, KalturaConfiguration +from KalturaClient.Plugins.Core import (KalturaSessionType, KalturaMediaEntry, + KalturaMediaType) +from flask_script import Manager + +from compair.core import db +from compair.kaltura.core import KalturaCore +from compair.models import Answer, KalturaMedia, File +from flask import current_app + +manager = Manager(usage="Kaltura Migration") + +def readMappingCsv(mappingCsv, noHeader): + oldToNewEntryIds = {} + idRe = re.compile(r"\d_\w{8}") + with open(mappingCsv, 'r') as csvFile: + csvReader = csv.reader(csvFile, skipinitialspace=True) + for row in csvReader: + if not noHeader and csvReader.line_num == 1: + continue + oldEntryId = row[0] + newEntryId = row[1] + if not (re.match(idRe, oldEntryId) and re.match(idRe, newEntryId)): + raise ValueError(f"Mapping file line {csvReader.line_num} has a value not in entry ID format.") + oldToNewEntryIds[oldEntryId] = newEntryId + if oldToNewEntryIds: + return oldToNewEntryIds + raise ValueError("Mapping file is empty") + + +def msg(msg, logfile): + print(msg) + logfile.write(f'{msg}\n') + logfile.flush() + + +def summarize(numToMigrate, numInvalid, numMigrated, numNoMapping, numTotal, + logfile): + msg( '-------- Summary --------', logfile) + msg(f' To be Migrated: {numToMigrate}', logfile) + msg(f' To be Deleted: {numInvalid}', logfile) + msg(f'Already Migrated: {numMigrated}', logfile) + msg(f' No Mapping: {numNoMapping}', logfile) + msg(f' Total: {numTotal}', logfile) + msg( '-------- ------- --------', logfile) + + +def deleteInvalidKalturaMedias(medias, logfile): + for media in medias: + msg(f'Deleting invalid kaltura media id {media.id}', logfile) + db.session.delete(media) + + +def connectKalturaApi(): + kClient = KalturaClient(KalturaConfiguration( + serviceUrl=KalturaCore.service_url())) + kSession = kClient.session.start( + KalturaCore.secret(), + KalturaCore.user_id(), + KalturaSessionType.ADMIN, + KalturaCore.partner_id(), + 86400, # session expires in 1 hour + "appID:compair" + ) + kClient.setKs(kSession) + return kClient + + +def migrateKalturaMedias(medias, oldToNewEntryIds, logfile): + kClient = connectKalturaApi() + + for media in medias: + mediaId = media.id + oldEntryId = media.entry_id + newEntryId = oldToNewEntryIds[oldEntryId] + msg(f'Processing id {mediaId}: Old {oldEntryId} to New {newEntryId}', + logfile) + newInfo = kClient.media.get(newEntryId, -1) + media.download_url = newInfo.getDownloadUrl() + media.partner_id = newInfo.getPartnerId() + media.service_url = KalturaCore.service_url() + media.player_id = KalturaCore.player_id() + media.entry_id = newEntryId + #db.session.add(media) + + +# Some videos were linked in answer content, we want to switch them to using +# the file attachment system, so have to create the associated file and +# kaltura_media table entries for them. +# +# Here's a complex query that gets answers that has learning.video.ubc.ca links +# in them and don't have an associated Kaltura attachment. For our data, we can +# get away with just seeing if the answer doesn't have an attached file entry, +# so that's what what was implemented, but this query is documented for ref: +# select answer.id, answer.content, answer.file_id, file.kaltura_media_id from answer left join file on answer.file_id = file.id where answer.content like '%learning.video.ubc.ca%' and (answer.file_id is NULL or file.kaltura_media_id is NULL) +def migrateAnswerLinks(answers, oldToNewEntryIds, logfile): + kClient = connectKalturaApi() + regex = re.compile(r'https://learning.video.ubc.ca/media/([%\w+-]+?)/(\w+?)"') + count = 0 + for answer in answers: + count += 1 + msg(f'Answer {count}: {answer.id}', logfile) + link = re.search(regex, answer.content) + if not link: + msg(f'Error: Answer {answer.id} content has no Kaltura link?', logfile) + continue + oldEntryId = link.group(2) + newEntryId = oldToNewEntryIds[oldEntryId] + newInfo = kClient.media.get(newEntryId, -1) + videoName = newInfo.getName() + ".mp4" + msg(f' Video Name: {videoName}', logfile) + msg(f' Old Entry ID: {oldEntryId}', logfile) + msg(f' New Entry ID: {newEntryId}', logfile) + msg(f' Creating Kaltura File Entries...', logfile) + kalturaMedia = KalturaMedia( + user=answer.user, + download_url=newInfo.getDownloadUrl(), + # can't figure out how to get the original source extension, so + # just assuming mp4 + file_name=videoName, + service_url=KalturaCore.service_url(), + partner_id=newInfo.getPartnerId(), + player_id=KalturaCore.player_id(), + entry_id=newEntryId + ) + db.session.add(kalturaMedia) + fileEntry = File( + user=answer.user, + kaltura_media=kalturaMedia, + alias=videoName + ) + db.session.add(fileEntry) + answer.file = fileEntry + db.session.commit() + fileEntry.name = fileEntry.uuid + '.' + kalturaMedia.extension + db.session.commit() + msg(f' Kaltura File Entries Created!', logfile) + + +# Some videos were linked in answer content, we want to switch them to using +# the file attachment system like all other kaltura media +@manager.command +def links(mappingCsv, noHeader=False, dryRun=False): + ts = datetime.now().isoformat(timespec='seconds') + logfile = open(f'kaltura-links-migration-log-{ts}.log', 'a') + msg('Starting Kaltura links migration', logfile) + oldToNewEntryIds = readMappingCsv(mappingCsv, noHeader) + newToOldEntryIds = dict(map(reversed, oldToNewEntryIds.items())) + needMigrationAnswers = [] + numInvalid = 0 + numAlreadyMigrated = 0 + numNoMapping = 0 + numTotal = 0 + answers = Answer.query \ + .filter(Answer.content.ilike('%learning.video.ubc.ca%')) \ + .filter(Answer.file_id.is_(None)) \ + .all() + regex = re.compile(r'https://learning.video.ubc.ca/media/([%\w+-]+?)/(\w+?)"') + # find out how much work needs to be done + for answer in answers: + numTotal += 1 + link = re.search(regex, answer.content) + if not link: + msg(f'Answer {answer.id} content has no Kaltura link?', logfile) + numInvalid += 1 + continue + entryId = link.group(2) + if not entryId: + msg(f'Answer {answer.id} Kaltura link has no entry ID', logfile) + numInvalid += 1 + elif entryId in oldToNewEntryIds: + msg(f"Migration needed on answer {answer.id}: Entry {entryId}", logfile) + needMigrationAnswers.append(answer) + elif entryId in newToOldEntryIds: + # this is always 0, since answers with a file_id won't show up in + # the query again + msg(f"Already migrated answer {answer.id}: Entry {entryId}", logfile) + numAlreadyMigrated += 1 + else: + # didn't find a mapping, perhaps missing from migration? + msg(f'No mapping for answer {answer.id}: Entry {entryId}', logfile) + numNoMapping += 1 + # summarize what needs to be done + summarize(len(needMigrationAnswers), numInvalid, numAlreadyMigrated, + numNoMapping, numTotal, logfile) + if dryRun: + msg(f'*** Dry run completed, no changes were made ***', logfile) + else: + msg(f'Starting database session', logfile) + migrateAnswerLinks(needMigrationAnswers, oldToNewEntryIds, logfile) + msg(f'Committing to database', logfile) + db.session.commit() + logfile.close() + + +@manager.command +def migrate(mappingCsv, noHeader=False, dryRun=False): + ts = datetime.now().isoformat(timespec='seconds') + logfile = open(f'kaltura-migration-log-{ts}.log', 'a') + msg('Starting Kaltura migration', logfile) + oldToNewEntryIds = readMappingCsv(mappingCsv, noHeader) + newToOldEntryIds = dict(map(reversed, oldToNewEntryIds.items())) + invalidKalturaMedias = [] # can't be migrated, might as well delete + needMigrationMedias = [] # needs to be migrated + numAlreadyMigrated = 0 + numNoMapping = 0 + numTotal = 0 + kalturaMedias = KalturaMedia.query.all() + # find out how much work needs to be done + for kalturaMedia in kalturaMedias: + numTotal += 1 + mediaId = kalturaMedia.id + entryId = kalturaMedia.entry_id + if not entryId: + msg(f'Empty entry ID for id {mediaId}', logfile) + invalidKalturaMedias.append(kalturaMedia) + elif entryId in oldToNewEntryIds: + msg(f"Migration needed for id {mediaId}: Entry {entryId}", logfile) + needMigrationMedias.append(kalturaMedia) + elif entryId in newToOldEntryIds: + msg(f"Already migrated id {mediaId}: Entry {entryId}", logfile) + numAlreadyMigrated += 1 + else: + # didn't find a mapping, perhaps missing from migration? + msg(f'No mapping for id {mediaId}: Entry {entryId}', logfile) + numNoMapping += 1 + # summarize what needs to be done + summarize(len(needMigrationMedias), len(invalidKalturaMedias), + numAlreadyMigrated, numNoMapping, numTotal, logfile) + # do the actual work in a transaction + if dryRun: + msg(f'*** Dry run completed, no changes were made ***', logfile) + else: + msg(f'Starting database session', logfile) + deleteInvalidKalturaMedias(invalidKalturaMedias, logfile) + migrateKalturaMedias(needMigrationMedias, oldToNewEntryIds, logfile) + msg(f'Committing to database', logfile) + db.session.commit() + logfile.close() diff --git a/compair/settings.py b/compair/settings.py index 8daf60594..3cd055ffb 100644 --- a/compair/settings.py +++ b/compair/settings.py @@ -1,5 +1,5 @@ import os - +import time """ Default settings, if no other settings is specified, values here are used. """ @@ -163,3 +163,7 @@ # Allow impersonation IMPERSONATION_ENABLED = True + +# when APP_TIMEZONE is empty or incorrect, it will default to system timezone +# example America/Vancouver or America/Montreal +APP_TIMEZONE = time.strftime('%Z') diff --git a/compair/static/modules/assignment/assignment-search-partial.html b/compair/static/modules/assignment/assignment-search-partial.html index e25b8d979..cd62d63ed 100644 --- a/compair/static/modules/assignment/assignment-search-partial.html +++ b/compair/static/modules/assignment/assignment-search-partial.html @@ -12,7 +12,7 @@

Plan Release Date

- +