diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 2e0c0ca2..142b348c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9] + python-version: [3.12] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1fff83db..0403d5bd 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,18 +20,28 @@ jobs: pip freeze | tee pip_freeze.log - name: Unit tests run: python3 manage.py test -v2 + - name: Regression tests + id: regression_tests run: bash RegressionTests/run.sh # Continue even if tests fail, so that we can collect test outputs for debugging continue-on-error: true + - name: Collect outputs + # This step will run even if the regression tests failed run: | find . -type f \( -name "*.log" -o -name "*.out" -o -name "*.diff" \) -print | cut -c3- > listing.txt echo "Creating an artifact with the following files:" cat listing.txt 7z a -tzip regression-tests-appraise.zip @listing.txt + - name: Publish outputs - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: regression-tests-appraise path: regression-tests-appraise.zip + + # Enforce the failure + - name: Check on failures + if: steps.regression_tests.outcome == 'failure' + run: exit 1 diff --git a/Appraise/settings.py b/Appraise/settings.py index b0347d44..6a307b1e 100644 --- a/Appraise/settings.py +++ b/Appraise/settings.py @@ -9,6 +9,7 @@ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ + import logging import os import warnings @@ -37,7 +38,9 @@ ALLOWED_HOSTS = os.environ.get('APPRAISE_ALLOWED_HOSTS', '127.0.0.1').split(',') -CSRF_TRUSTED_ORIGINS = os.environ.get('APPRAISE_CSRF_TRUSTED_ORIGINS', 'https://*.127.0.0.1').split(',') +CSRF_TRUSTED_ORIGINS = os.environ.get( + 'APPRAISE_CSRF_TRUSTED_ORIGINS', 'https://*.127.0.0.1' +).split(',') WSGI_APPLICATION = os.environ.get( 'APPRAISE_WSGI_APPLICATION', 'Appraise.wsgi.application' @@ -208,7 +211,7 @@ # Base context for all views. BASE_CONTEXT = { - 'commit_tag': '#wmt24dev', + 'commit_tag': '#wmt25dev', 'title': 'Appraise evaluation system', 'static_url': STATIC_URL, } diff --git a/Appraise/urls.py b/Appraise/urls.py index 2274e1c6..09946d45 100644 --- a/Appraise/urls.py +++ b/Appraise/urls.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + # pylint: disable=unused-import,import-error from django.conf.urls import handler404 from django.conf.urls import handler500 @@ -187,8 +188,8 @@ name='pairwise-assessment-document', ), re_path( - r'^campaign-status/(?P[a-zA-Z0-9]+)/' - r'(?P[0123456])?/?$', + r'^campaign-status/(?P[a-zA-Z0-9]+(,[a-zA-Z0-9]+)*)/' + r'(?P[a-zA-Z0-9_])?/?$', campaign_views.campaign_status, name='campaign_status', ), diff --git a/Appraise/utils.py b/Appraise/utils.py index 4e9093e2..27540d8a 100644 --- a/Appraise/utils.py +++ b/Appraise/utils.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + import logging from Appraise.settings import LOG_HANDLER @@ -33,8 +34,8 @@ def _compute_user_total_annotation_time(timestamps): def _clamp_time(seconds): # if a segment takes longer than 10 minutes, set it to 5 minutes # it's likely due to inactivity - if seconds >= 10*60: - return 5*60 + if seconds >= 10 * 60: + return 5 * 60 else: return seconds @@ -54,4 +55,4 @@ def _clamp_time(seconds): # Update the previous end timestamp previous_end_timestamp = end_timestamp - return total_annotation_time \ No newline at end of file + return total_annotation_time diff --git a/Appraise/wsgi.py b/Appraise/wsgi.py index 7ec10196..6c2ef408 100644 --- a/Appraise/wsgi.py +++ b/Appraise/wsgi.py @@ -6,6 +6,7 @@ For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ + import os from django.core.wsgi import get_wsgi_application diff --git a/Campaign/admin.py b/Campaign/admin.py index 55d82a1c..fce8c9db 100644 --- a/Campaign/admin.py +++ b/Campaign/admin.py @@ -1,6 +1,7 @@ """ Campaign admin.py """ + # pylint: disable=C0330,import-error from django.contrib import admin from django.contrib.admin.filters import AllValuesFieldListFilter @@ -10,14 +11,18 @@ from Campaign.models import CampaignTeam from Campaign.models import TrustedUser from EvalData.admin import BaseMetadataAdmin - +from django.http import HttpResponse +import csv +import zipfile +from io import StringIO +import importlib class DropdownFilter(AllValuesFieldListFilter): """ Experimental dropdown filter. """ - template = 'Campaign/filter_select.html' + template = "Campaign/filter_select.html" class CampaignTeamAdmin(BaseMetadataAdmin): @@ -26,33 +31,33 @@ class CampaignTeamAdmin(BaseMetadataAdmin): """ list_display = [ - 'teamName', - 'owner', - 'teamMembers', - 'requiredAnnotations', - 'requiredHours', - 'completionStatus', + "teamName", + "owner", + "teamMembers", + "requiredAnnotations", + "requiredHours", + "completionStatus", ] + BaseMetadataAdmin.list_display # type: ignore - list_filter = ['owner'] + BaseMetadataAdmin.list_filter # type: ignore + list_filter = ["owner"] + BaseMetadataAdmin.list_filter # type: ignore search_fields = [ - 'teamName', - 'owner__username', - 'owner__first_name', - 'owner__last_name', + "teamName", + "owner__username", + "owner__first_name", + "owner__last_name", ] + BaseMetadataAdmin.search_fields # type: ignore - filter_horizontal = ['members'] + filter_horizontal = ["members"] fieldsets = ( ( None, { - 'fields': ( - 'teamName', - 'owner', - 'members', - 'requiredAnnotations', - 'requiredHours', + "fields": ( + "teamName", + "owner", + "members", + "requiredAnnotations", + "requiredHours", ) }, ), @@ -65,22 +70,22 @@ class CampaignDataAdmin(BaseMetadataAdmin): """ list_display = [ - 'dataName', - 'market', - 'metadata', - 'dataValid', - 'dataReady', + "dataName", + "market", + "metadata", + "dataValid", + "dataReady", ] + BaseMetadataAdmin.list_display # type: ignore list_filter = [ - 'dataValid', - 'dataReady', + "dataValid", + "dataReady", ] + BaseMetadataAdmin.list_filter # type: ignore search_fields = [ # nothing model specific ] + BaseMetadataAdmin.search_fields # type: ignore fieldsets = ( - (None, {'fields': ('dataFile', 'market', 'metadata')}), + (None, {"fields": ("dataFile", "market", "metadata")}), ) + BaseMetadataAdmin.fieldsets # type: ignore @@ -89,7 +94,8 @@ class CampaignAdmin(BaseMetadataAdmin): Model admin for Campaign instances. """ - list_display = ['campaignName'] + BaseMetadataAdmin.list_display + ['id'] # type: ignore + list_display = ["campaignName"] + \ + BaseMetadataAdmin.list_display + ["id"] # type: ignore list_filter = [ # nothing model specific ] + BaseMetadataAdmin.list_filter # type: ignore @@ -97,39 +103,89 @@ class CampaignAdmin(BaseMetadataAdmin): # nothing model specific ] + BaseMetadataAdmin.search_fields # type: ignore - filter_horizontal = ['batches'] + filter_horizontal = ["batches"] fieldsets = ( ( None, { - 'fields': ( - 'campaignName', - 'packageFile', - 'teams', - 'batches', - 'campaignOptions', + "fields": ( + "campaignName", + "packageFile", + "teams", + "batches", + "campaignOptions", ) }, ), ) + BaseMetadataAdmin.fieldsets # type: ignore + actions = ["export_results"] + + def _retrieve_csv(self, current_campaign): + # Get the task type corresponding to the campaign + qs_name = current_campaign.get_campaign_type().lower() + qs_attr = "evaldata_{0}_campaign".format(qs_name) + qs_obj = getattr(current_campaign, qs_attr, None) + cls = type(qs_obj.all()[0]) + cls_name = cls.__name__ + cls_name = cls_name.replace("Task", "Result") + module = importlib.import_module(cls.__module__) + cls = getattr(module, cls_name) + + # Now get the content + f = StringIO() + writer = csv.writer(f) + csv_content = cls.get_system_data(current_campaign.id, extended_csv=True) + for r in csv_content: + writer.writerow(r) + + f.seek(0) + return f + + + def export_results(self, request, queryset): + if len(queryset) == 1: + current_campaign = queryset[0] + csv_content = self._retrieve_csv(current_campaign) + filename = f"results_{current_campaign.campaignName}.csv" + response = HttpResponse(csv_content, content_type="text/csv") + response["Content-Disposition"] = f"attachment; filename={filename}" + else: + response = HttpResponse(content_type='application/zip') + response['Content-Disposition'] = 'attachment; filename="campaign_results.zip"' + + # Create a zip file with selected objects + with zipfile.ZipFile(response, 'w') as zipf: + for current_campaign in queryset: + + csv_content = self._retrieve_csv(current_campaign) + # Add objects to the zip file, customize as per your model's data + # For example, you can add an object's name and description to a text file in the zip + filename = f"results_{current_campaign.campaignName}.csv" + zipf.writestr(filename, csv_content.getvalue()) + return response + + export_results.short_description = "Download results" + + + class TrustedUserAdmin(admin.ModelAdmin): """ Model admin for Campaign instances. """ - list_display = ['user', 'campaign'] + list_display = ["user", "campaign"] list_filter = [ - ('campaign__campaignName', DropdownFilter), - # 'campaign' + ("campaign__campaignName", DropdownFilter), + # "campaign" ] search_fields = [ # type: ignore # nothing model specific ] - fieldsets = ((None, {'fields': ('user', 'campaign')}),) + fieldsets = ((None, {"fields": ("user", "campaign")}),) admin.site.register(CampaignTeam, CampaignTeamAdmin) diff --git a/Campaign/management/commands/ComputeSystemScores.py b/Campaign/management/commands/ComputeSystemScores.py index 4898fba1..9e948135 100644 --- a/Campaign/management/commands/ComputeSystemScores.py +++ b/Campaign/management/commands/ComputeSystemScores.py @@ -10,6 +10,7 @@ from EvalData.models import DirectAssessmentResult from EvalData.models import DirectAssessmentTask + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Computes system scores over all results' diff --git a/Campaign/management/commands/ComputeWMT21Results.py b/Campaign/management/commands/ComputeWMT21Results.py index d5c6dab6..76eaa645 100644 --- a/Campaign/management/commands/ComputeWMT21Results.py +++ b/Campaign/management/commands/ComputeWMT21Results.py @@ -463,7 +463,7 @@ def handle(self, *args, **options): wins_for_system = defaultdict(list) losses_for_system = defaultdict(list) p_level = 0.05 - for (sysA, sysB) in combinations_with_replacement(system_ids, 2): + for sysA, sysB in combinations_with_replacement(system_ids, 2): sysA_ids = set([x[0] for x in system_z_scores[sysA]]) sysB_ids = set([x[0] for x in system_z_scores[sysB]]) good_ids = set.intersection(sysA_ids, sysB_ids) diff --git a/Campaign/management/commands/ComputeZScores.py b/Campaign/management/commands/ComputeZScores.py index f11be90e..183e0ee2 100644 --- a/Campaign/management/commands/ComputeZScores.py +++ b/Campaign/management/commands/ComputeZScores.py @@ -427,7 +427,7 @@ def handle(self, *args, **options): wins_for_system = defaultdict(list) p_level = 0.05 - for (sysA, sysB) in combinations_with_replacement(system_ids, 2): + for sysA, sysB in combinations_with_replacement(system_ids, 2): sysA_ids = set([x[0] for x in system_z_scores[sysA]]) sysB_ids = set([x[0] for x in system_z_scores[sysB]]) good_ids = set.intersection(sysA_ids, sysB_ids) @@ -577,7 +577,7 @@ def sort_by_wins_and_z_score(x, y): key = system_id[:4].upper() vsystems[key].extend(system_z_scores[system_id]) - for (sysA, sysB) in combinations_with_replacement( + for sysA, sysB in combinations_with_replacement( ['GOOG', 'CAND', 'PROD'], 2 ): sysA_scores = [x[1] for x in vsystems[sysA]] diff --git a/Campaign/management/commands/InitCampaignMMT18Task1.py b/Campaign/management/commands/InitCampaignMMT18Task1.py index d43dd536..26338ed9 100644 --- a/Campaign/management/commands/InitCampaignMMT18Task1.py +++ b/Campaign/management/commands/InitCampaignMMT18Task1.py @@ -31,6 +31,7 @@ } REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign MMT18 Task #1' diff --git a/Campaign/management/commands/InitCampaignMMT18Task1b.py b/Campaign/management/commands/InitCampaignMMT18Task1b.py index 6db7b98f..e6829bda 100644 --- a/Campaign/management/commands/InitCampaignMMT18Task1b.py +++ b/Campaign/management/commands/InitCampaignMMT18Task1b.py @@ -27,6 +27,7 @@ } REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign MMT18 Task #1.b' diff --git a/Campaign/management/commands/InitCampaignMMT18Task1bv2.py b/Campaign/management/commands/InitCampaignMMT18Task1bv2.py index a09ffca5..653cb06f 100644 --- a/Campaign/management/commands/InitCampaignMMT18Task1bv2.py +++ b/Campaign/management/commands/InitCampaignMMT18Task1bv2.py @@ -27,6 +27,7 @@ } REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign MMT18 Task #1.b v2' diff --git a/Campaign/management/commands/InitCampaignMMT18Task1bv3.py b/Campaign/management/commands/InitCampaignMMT18Task1bv3.py index 46c8e2c7..7503db6c 100644 --- a/Campaign/management/commands/InitCampaignMMT18Task1bv3.py +++ b/Campaign/management/commands/InitCampaignMMT18Task1bv3.py @@ -27,6 +27,7 @@ } REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign MMT18 Task #1.b v3' diff --git a/Campaign/management/commands/InitCampaignMMT18Task1v2.py b/Campaign/management/commands/InitCampaignMMT18Task1v2.py index 99c7908f..bd5fb2c2 100644 --- a/Campaign/management/commands/InitCampaignMMT18Task1v2.py +++ b/Campaign/management/commands/InitCampaignMMT18Task1v2.py @@ -31,6 +31,7 @@ } REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign MMT18 Task #1 v2' diff --git a/Campaign/management/commands/InitCampaignMMT18Task1v3.py b/Campaign/management/commands/InitCampaignMMT18Task1v3.py index 9d364728..2caa2932 100644 --- a/Campaign/management/commands/InitCampaignMMT18Task1v3.py +++ b/Campaign/management/commands/InitCampaignMMT18Task1v3.py @@ -27,6 +27,7 @@ } REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign MMT18 Task #1 v3' diff --git a/Campaign/management/commands/InitCampaignWMT18RefDA.py b/Campaign/management/commands/InitCampaignWMT18RefDA.py index 7513039a..117c58d3 100644 --- a/Campaign/management/commands/InitCampaignWMT18RefDA.py +++ b/Campaign/management/commands/InitCampaignWMT18RefDA.py @@ -23,6 +23,7 @@ TASKS = 100 REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign WMT18 RefDA' diff --git a/Campaign/management/commands/InitCampaignWMT18RefDA2.py b/Campaign/management/commands/InitCampaignWMT18RefDA2.py index c46a0b15..22479855 100644 --- a/Campaign/management/commands/InitCampaignWMT18RefDA2.py +++ b/Campaign/management/commands/InitCampaignWMT18RefDA2.py @@ -23,6 +23,7 @@ TASKS = 100 REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign WMT18 RefDA2' diff --git a/Campaign/management/commands/InitCampaignWMT18RefDA3.py b/Campaign/management/commands/InitCampaignWMT18RefDA3.py index 4a890793..c5ceaf37 100644 --- a/Campaign/management/commands/InitCampaignWMT18RefDA3.py +++ b/Campaign/management/commands/InitCampaignWMT18RefDA3.py @@ -23,6 +23,7 @@ TASKS = 34 REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign WMT18 RefDA3' diff --git a/Campaign/management/commands/InitCampaignWMT18RefDA4.py b/Campaign/management/commands/InitCampaignWMT18RefDA4.py index 8d2e4852..b58d4d0d 100644 --- a/Campaign/management/commands/InitCampaignWMT18RefDA4.py +++ b/Campaign/management/commands/InitCampaignWMT18RefDA4.py @@ -23,6 +23,7 @@ TASKS = 100 REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign WMT18 RefDA4' diff --git a/Campaign/management/commands/InitCampaignWMT18SrcDA.py b/Campaign/management/commands/InitCampaignWMT18SrcDA.py index f292a99b..e066880d 100644 --- a/Campaign/management/commands/InitCampaignWMT18SrcDA.py +++ b/Campaign/management/commands/InitCampaignWMT18SrcDA.py @@ -23,6 +23,7 @@ TASKS = 34 REDUNDANCY = 1 + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign WMT18 SrcDA' diff --git a/Campaign/management/commands/MakeAnnotation.py b/Campaign/management/commands/MakeAnnotation.py index 9faad8c5..9c0be3de 100644 --- a/Campaign/management/commands/MakeAnnotation.py +++ b/Campaign/management/commands/MakeAnnotation.py @@ -123,9 +123,7 @@ def handle(self, *args, **options): exit() if options["verbosity"] > 1: - self.stdout.write( - f"Available context keys: {response.context.keys()}" - ) + self.stdout.write(f"Available context keys: {response.context.keys()}") # Each task has different context, so the POST request needs to be # built separately for each task type diff --git a/Campaign/management/commands/StartNewCampaign.py b/Campaign/management/commands/StartNewCampaign.py index 8680b7b7..3f172e13 100644 --- a/Campaign/management/commands/StartNewCampaign.py +++ b/Campaign/management/commands/StartNewCampaign.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + from datetime import datetime from os import path @@ -26,6 +27,7 @@ from Dashboard.utils import generate_confirmation_token from EvalData.management.commands.UpdateEvalDataModels import _update_eval_data_models + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'A single command for creating a new campaign based on manifest file' diff --git a/Campaign/management/commands/UpdateCampaignModels.py b/Campaign/management/commands/UpdateCampaignModels.py index 796db261..04715028 100644 --- a/Campaign/management/commands/UpdateCampaignModels.py +++ b/Campaign/management/commands/UpdateCampaignModels.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + # pylint: disable=W0611 from os import path @@ -21,6 +22,7 @@ INFO_MSG = 'INFO: ' WARNING_MSG = 'WARN: ' + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Updates object instances required for Campaign app' diff --git a/Campaign/management/commands/init_campaign.py b/Campaign/management/commands/init_campaign.py index c8846ea6..537b8eff 100644 --- a/Campaign/management/commands/init_campaign.py +++ b/Campaign/management/commands/init_campaign.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + from datetime import datetime from django.core.management.base import BaseCommand @@ -21,6 +22,7 @@ from Campaign.utils import CAMPAIGN_TASK_TYPES from Dashboard.utils import generate_confirmation_token + # pylint: disable=C0111,C0330,E1101 class Command(BaseCommand): help = 'Initialises campaign based on manifest file' diff --git a/Campaign/management/commands/validatecampaigndata.py b/Campaign/management/commands/validatecampaigndata.py index 7432425f..4331c3f7 100644 --- a/Campaign/management/commands/validatecampaigndata.py +++ b/Campaign/management/commands/validatecampaigndata.py @@ -1,6 +1,7 @@ """ Appraise """ + # pylint: disable=C0103,C0111,C0330,E1101 import sys from json import loads diff --git a/Campaign/models.py b/Campaign/models.py index ed2632f1..46be62ed 100644 --- a/Campaign/models.py +++ b/Campaign/models.py @@ -1,6 +1,7 @@ """ Campaign models.py """ + # pylint: disable=C0111,C0330,E1101 from json import JSONDecodeError from json import loads diff --git a/Campaign/tests.py b/Campaign/tests.py index 35568675..7f23c473 100644 --- a/Campaign/tests.py +++ b/Campaign/tests.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + from pathlib import Path from django.contrib.auth.models import User diff --git a/Campaign/utils.py b/Campaign/utils.py index 18b17ecc..bcabdb08 100644 --- a/Campaign/utils.py +++ b/Campaign/utils.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + from collections import defaultdict from collections import OrderedDict from hashlib import md5 diff --git a/Campaign/views.py b/Campaign/views.py index d1198771..7c07b62b 100644 --- a/Campaign/views.py +++ b/Campaign/views.py @@ -11,7 +11,6 @@ from math import floor from math import sqrt -from django.contrib.auth.decorators import login_required from django.core.management.base import CommandError from django.http import HttpResponse @@ -23,6 +22,8 @@ from EvalData.models import PairwiseAssessmentResult from EvalData.models import seconds_to_timedelta from EvalData.models import TASK_DEFINITIONS +from EvalData.models import TaskAgenda +from EvalData.models.direct_assessment_document import DirectAssessmentDocumentTask # pylint: disable=import-error @@ -31,8 +32,7 @@ LOGGER = _get_logger(name=__name__) -@login_required -def campaign_status(request, campaign_name, sort_key=2): +def campaign_status(request, campaign_name, sort_key=None): """ Campaign status view with completion details. """ @@ -40,8 +40,19 @@ def campaign_status(request, campaign_name, sort_key=2): 'Rendering campaign status view for user "%s".', request.user.username or "Anonymous", ) - if sort_key is None: - sort_key = 2 + + if "," in campaign_name: + responses = [campaign_status(request, name, sort_key) for name in campaign_name.split(",")] + if not all([response.headers["Content-Type"] == responses[0].headers["Content-Type"] for response in responses]): + return HttpResponse( + 'ERROR: You are mixing unrelated campaigns (views.py:campaign_status).', + content_type='text/plain', + ) + else: + return HttpResponse( + "\n\n".join([response.content.decode('utf-8') for response in responses]), + content_type=responses[0].headers["Content-Type"], + ) # Get Campaign instance for campaign name try: @@ -51,26 +62,37 @@ def campaign_status(request, campaign_name, sort_key=2): _msg = 'Failure to identify campaign {0}'.format(campaign_name) return HttpResponse(_msg, content_type='text/plain') + try: + campaign_opts = campaign.campaignOptions.lower().split(";") + # may raise KeyError + result_type = RESULT_TYPE_BY_CLASS_NAME[campaign.get_campaign_type()] + except KeyError as exc: + LOGGER.debug( + f'Invalid campaign type {campaign.get_campaign_type()} for campaign {campaign.campaignName}' + ) + LOGGER.error(exc) + return HttpResponse( + 'Invalid campaign type for campaign {0}'.format(campaign.campaignName), + content_type='text/plain', + ) + + # special handling for ESA + if "esa" in campaign_opts: + return campaign_status_esa(campaign) + else: + return campaign_status_plain(request, campaign, result_type, campaign_opts, sort_key) + + +def campaign_status_plain(request, campaign, result_type, campaign_opts, sort_key): _out = [] for team in campaign.teams.all(): for user in team.members.all(): - try: - campaign_opts = campaign.campaignOptions.lower().split(";") - # may raise KeyError - result_type = RESULT_TYPE_BY_CLASS_NAME[campaign.get_campaign_type()] - except KeyError as exc: - LOGGER.debug( - f'Invalid campaign type {campaign.get_campaign_type()} for campaign {campaign.campaignName}' - ) - LOGGER.error(exc) - continue _data = result_type.objects.filter( createdBy=user, completed=True, task__campaign=campaign.id ) is_mqm_or_esa = False - # Exclude document scores in document-level tasks, because we want to keep # the numbers reported on the campaign status page consistent across # accounts, which usually include different numbers of document @@ -107,9 +129,10 @@ def campaign_status(request, campaign_name, sort_key=2): ) # compute time override based on document times import collections + _time_pairs = collections.defaultdict(list) for x in _data: - _time_pairs[x[7]+ " ||| " +x[4]].append((x[0], x[1])) + _time_pairs[x[7] + " ||| " + x[4]].append((x[0], x[1])) _time_pairs = [ (min([x[0] for x in doc_v]), max([x[1] for x in doc_v])) for doc, doc_v in _time_pairs.items() @@ -118,31 +141,6 @@ def campaign_status(request, campaign_name, sort_key=2): (x[0], x[1], -len(json.loads(x[2])), x[3], x[4], x[5], x[6]) for x in _data ] - elif "esa" in campaign_opts: - is_mqm_or_esa = True - _data = _data.values_list( - 'start_time', - 'end_time', - 'score', - 'item__itemID', - 'item__targetID', - 'item__itemType', - 'item__id', - 'item__documentID', - ) - # compute time override based on document times - import collections - _time_pairs = collections.defaultdict(list) - for x in _data: - _time_pairs[x[7]+ " ||| " +x[4]].append((x[0], x[1])) - _time_pairs = [ - (min([x[0] for x in doc_v]), max([x[1] for x in doc_v])) - for doc, doc_v in _time_pairs.items() - ] - _data = [ - (x[0], x[1], x[2], x[3], x[4], x[5], x[6]) - for x in _data - ] else: _data = _data.values_list( 'start_time', @@ -171,7 +169,7 @@ def campaign_status(request, campaign_name, sort_key=2): _first_modified = str(_date_modified).split('.')[0] else: _first_modified = 'Never' - + # Compute last modified time _last_modified_raw = ( seconds_to_timedelta(max(_end_times)) if _end_times else None @@ -185,8 +183,10 @@ def campaign_status(request, campaign_name, sort_key=2): # Compute total annotation time if is_mqm_or_esa and _first_modified_raw and _last_modified_raw: # for MQM and ESA compute the lower and upper annotation times - # use only the end times - _annotation_time_upper = (_last_modified_raw-_first_modified_raw).seconds + # use only the end times + _annotation_time_upper = ( + _last_modified_raw - _first_modified_raw + ).seconds _hours = int(floor(_annotation_time_upper / 3600)) _minutes = int(floor((_annotation_time_upper % 3600) / 60)) _annotation_time_upper = f'{_hours:0>2d}h{_minutes:0>2d}m' @@ -206,7 +206,6 @@ def campaign_status(request, campaign_name, sort_key=2): else: _annotation_time = 'n/a' - _item = ( user.username, user.is_active, @@ -220,7 +219,7 @@ def campaign_status(request, campaign_name, sort_key=2): _out.append(_item) - _out.sort(key=lambda x: x[int(sort_key)]) + _out.sort(key=lambda x: x[sort_key if sort_key else 2]) _header = ( 'username', @@ -246,6 +245,131 @@ def campaign_status(request, campaign_name, sort_key=2): return HttpResponse(u'\n'.join(_txt), content_type='text/plain') +def campaign_status_esa(campaign) -> str: + import collections + out_str = """ + + + \n + """ + out_str += f"

{campaign.campaignName}

\n" + out_str += "\n" + out_str += """ + + + + + + +\n +""" + for team in campaign.teams.all(): + for user in team.members.all(): + if user.is_staff: + continue + out_str += "" + + # Get the task for this user even when there's no completed data + task = None + + # First try to get the task from TaskAgenda + agenda = TaskAgenda.objects.filter(user=user, campaign=campaign).first() + if agenda: + # Try to get an open or completed task from the agenda + for serialized_task in agenda.serialized_open_tasks(): + potential_task = serialized_task.get_object_instance() + if isinstance(potential_task, DirectAssessmentDocumentTask): + task = potential_task + break + # If no open task, try completed tasks + if not task: + for serialized_task in agenda._completed_tasks.all(): + potential_task = serialized_task.get_object_instance() + if isinstance(potential_task, DirectAssessmentDocumentTask): + task = potential_task + break + + # Get the completed data for this user + _data = DirectAssessmentDocumentResult.objects.filter( + createdBy=user, completed=True, task__campaign=campaign.id + ) + _data_uniq_len = len({(item.item.sourceID, item.item.targetID, item.item.itemType, item.item.id) for item in _data}) + + # If no data, show 0 progress or show that no task is assigned + if not _data: + if task: + total_count = task.items.count() + out_str += f"" + out_str += f"" + else: + # No task assigned to this user + out_str += f"" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + + # If we have data, show the progress + else: + if not task: + # Fallback to checking the first result's task for the task ID + task = DirectAssessmentDocumentTask.objects.filter(id=_data[0].task_id).first() + if not task: + # Skip this user if we can't find the task + out_str += f"" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + out_str += "\n" + continue + + total_count = task.items.count() + if total_count == _data_uniq_len: + out_str += f"" + else: + out_str += f"" + out_str += f"" + first_modified = min([x.start_time for x in _data]) + last_modified = max([x.end_time for x in _data]) + + first_modified_str = str(datetime(1970, 1, 1) + seconds_to_timedelta(first_modified)).split('.')[0] + last_modified_str = str(datetime(1970, 1, 1) + seconds_to_timedelta(last_modified)).split('.')[0] + # remove seconds + first_modified_str = ":".join(first_modified_str.split(":")[:-1]) + last_modified_str = ":".join(last_modified_str.split(":")[:-1]) + + out_str += f"" + out_str += f"" + annotation_time_upper = last_modified - first_modified + annotation_time_upper = f'{int(floor(annotation_time_upper / 3600)):0>2d}h {int(floor((annotation_time_upper % 3600) / 60)):0>2d}m' + out_str += f"" + + # consider time that's in any action within 10 minutes + times = sorted([item.start_time for item in _data] + [item.end_time for item in _data]) + annotation_time = sum([b-a for a, b in zip(times, times[1:]) if (b-a) < 10*60]) + annotation_time = f'{int(floor(annotation_time / 3600)):0>2d}h {int(floor((annotation_time % 3600) / 60)):0>2d}m' + + out_str += f"" + + out_str += "\n" + + out_str += "
UsernameProgressFirst ModifiedLast ModifiedTime (Coarse❔)Time (Real❔)
{user.username} 💤0/{total_count} (0%){user.username} 💤No task assigned{user.username} ❌Task not found
{user.username} ✅{user.username} 🛠️{_data_uniq_len}/{total_count} ({_data_uniq_len / total_count:.0%}){first_modified_str}{last_modified_str}{annotation_time_upper}{annotation_time}
" + return HttpResponse(out_str, content_type='text/html') + + def stat_reliable_testing(_data, campaign_opts, result_type): _annotations = len(set([x[6] for x in _data])) _user_mean = sum([x[2] for x in _data]) / (_annotations or 1) diff --git a/Dashboard/admin.py b/Dashboard/admin.py index 36289357..c09c0302 100644 --- a/Dashboard/admin.py +++ b/Dashboard/admin.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + # pylint: disable=import-error from django.contrib import admin diff --git a/Dashboard/apps.py b/Dashboard/apps.py index b44cea51..3105d180 100644 --- a/Dashboard/apps.py +++ b/Dashboard/apps.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + from django.apps import AppConfig diff --git a/Dashboard/management/commands/CreateInviteTokens.py b/Dashboard/management/commands/CreateInviteTokens.py index 275a6995..129c461c 100644 --- a/Dashboard/management/commands/CreateInviteTokens.py +++ b/Dashboard/management/commands/CreateInviteTokens.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + # pylint: disable=C0103 from collections import defaultdict from csv import DictReader diff --git a/Dashboard/management/commands/UpdateDashboardModels.py b/Dashboard/management/commands/UpdateDashboardModels.py index 77db1ce0..7a71f0d7 100644 --- a/Dashboard/management/commands/UpdateDashboardModels.py +++ b/Dashboard/management/commands/UpdateDashboardModels.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + # pylint: disable=C0103,W0611 from os.path import basename @@ -20,6 +21,7 @@ INFO_MSG = 'INFO: ' WARNING_MSG = 'WARN: ' + # pylint: disable=C0111,C0330 class Command(BaseCommand): help = 'Updates object instances required for Dashboard app' diff --git a/Dashboard/models.py b/Dashboard/models.py index a0305879..b9eb8252 100644 --- a/Dashboard/models.py +++ b/Dashboard/models.py @@ -3,6 +3,7 @@ See LICENSE for usage details """ + from uuid import uuid4 from django.contrib.auth.models import Group @@ -224,6 +225,7 @@ 'kas': 'Kashmiri (كٲشُر)', 'mni': 'Meitei (ꯃꯩꯇꯩꯂꯣꯟ)', 'sat': 'Santali (ᱥᱟᱱᱛᱟᱲᱤ)', + 'mas': 'Maasai (Ol Maa)', } # All sign language codes diff --git a/Dashboard/templates/Dashboard/base.html b/Dashboard/templates/Dashboard/base.html index de76c1fd..b2410d1a 100644 --- a/Dashboard/templates/Dashboard/base.html +++ b/Dashboard/templates/Dashboard/base.html @@ -80,7 +80,7 @@