diff --git a/Appraise/urls.py b/Appraise/urls.py index 54a122be..a1690c8c 100644 --- a/Appraise/urls.py +++ b/Appraise/urls.py @@ -188,8 +188,8 @@ name='pairwise-assessment-document', ), re_path( - r'^campaign-status/(?P[a-zA-Z0-9]+)/' - r'(?P[0123456])?/?$', + r'^campaign-status/(?P[a-zA-Z0-9]+(,[a-zA-Z0-9]+))*/' + r'(?P[a-zA-Z0-9_])?/?$', campaign_views.campaign_status, name='campaign_status', ), diff --git a/Campaign/views.py b/Campaign/views.py index 4162f743..2f326f5c 100644 --- a/Campaign/views.py +++ b/Campaign/views.py @@ -32,7 +32,7 @@ LOGGER = _get_logger(name=__name__) -def campaign_status(request, campaign_name): +def campaign_status(request, campaign_name, sort_key=None): """ Campaign status view with completion details. """ @@ -41,6 +41,19 @@ def campaign_status(request, campaign_name): request.user.username or "Anonymous", ) + if "," in campaign_name: + responses = [campaign_status(request, name, sort_key) for name in campaign_name.split(",")] + if not all([response.headers["Content-Type"] == responses[0].headers["Content-Type"] for response in responses]): + return HttpResponse( + 'ERROR: You are mixing unrelated campaigns (views.py:campaign_status).', + content_type='text/plain', + ) + else: + return HttpResponse( + "\n\n".join([response.content.decode('utf-8') for response in responses]), + content_type=responses[0].headers["Content-Type"], + ) + # Get Campaign instance for campaign name try: campaign = _get_campaign_instance(campaign_name) @@ -66,7 +79,11 @@ def campaign_status(request, campaign_name): # special handling for ESA if "esa" in campaign_opts: return campaign_status_esa(campaign) + else: + return campaign_status_plain(request, campaign, result_type, campaign_opts, sort_key) + +def campaign_status_plain(request, campaign, result_type, campaign_opts, sort_key): _out = [] for team in campaign.teams.all(): for user in team.members.all(): @@ -202,7 +219,7 @@ def campaign_status(request, campaign_name): _out.append(_item) - _out.sort(key=lambda x: x[2]) + _out.sort(key=lambda x: x[sort_key if sort_key else 2]) _header = ( 'username', @@ -250,7 +267,7 @@ def campaign_status_esa(campaign) -> str: out_str += "" + "".join( f"{x}" for x in ["Username", "Progress", "First Modified", "Last Modified", "Time (Last-First)", "Time (Real)"] ) + "\n" - + for team in campaign.teams.all(): for user in team.members.all(): if user.is_staff: diff --git a/Dashboard/utils.py b/Dashboard/utils.py index 44848e13..5bd73fb0 100644 --- a/Dashboard/utils.py +++ b/Dashboard/utils.py @@ -146,20 +146,20 @@ def run_quality_control(username): # File "scipy/stats/stats.py", line 4865, in mannwhitneyu # raise ValueError( # 'All numbers are identical in mannwhitneyu') + # In this case, let's consider it a failed QC. except ValueError: - pass + _pvalue = 1 # Compute the total annotation time - _durations = [x[1] - x[0] for x in _data] - annotation_time = sum(_durations) if _durations else None + # Be very generous, essentially last action - first action (not individual times) + times = [x[1] for x in _data] + [x[0] for x in _data] + annotation_time = max(times) - min(times) if times else 0 print( f"User '{username}', items= {len(_x)}, p-value= {pvalue}, time= {annotation_time}" ) - return ( - pvalue is not None - and pvalue <= MAX_WILCOXON_PVALUE - and annotation_time is not None - and annotation_time >= MIN_ANNOTATION_TIME + return annotation_time >= MIN_ANNOTATION_TIME and ( + pvalue is None or + pvalue <= MAX_WILCOXON_PVALUE ) diff --git a/EvalView/templates/EvalView/_instructions-esa.html b/EvalView/templates/EvalView/_instructions-esa.html index 38abbe41..a930aabc 100644 --- a/EvalView/templates/EvalView/_instructions-esa.html +++ b/EvalView/templates/EvalView/_instructions-esa.html @@ -16,6 +16,9 @@
  • Missing content: If something is missing, highlight the word [MISSING] to mark the error.
  • Tip: Highlight the word or general area of the error (it doesn't need to be exact). Use multiple highlights for different errors.
  • Tip: Pay particular attention to translation consistency between texts across the whole document.
  • +
  • Tip: If the translation is in the wrong language, mark it fully and assign it 0%
  • +
  • Tip: If the translation contains additional text (e.g. "Here is the translation") or alternative secondary translation, mark it as a major error.
  • +
  • Score the translation: After marking errors, please use the slider and set an overall score based on meaning preservation and general quality:
    • 0: Broken/poor translation.