diff --git a/EvalData/models/direct_assessment_document.py b/EvalData/models/direct_assessment_document.py index 410b736c..4499bf6a 100644 --- a/EvalData/models/direct_assessment_document.py +++ b/EvalData/models/direct_assessment_document.py @@ -255,11 +255,12 @@ def next_document_for_user_mqmesa(self, user): Used for MQM/ESA views Specifically a tuple with: next_item, - completed_items, - completed_docs, + items_completed, + items_total, + docs_completed, + docs_total, doc_items, doc_items_results, - total_docs, """ # get all items (100) and try to find resul @@ -274,16 +275,19 @@ def next_document_for_user_mqmesa(self, user): ] unfinished_items = [i for i, r in all_items if not r] - docs_total = len({i.documentID for i, r in all_items}) + # documentID + targetID uniquely identifies documents + docs_total = len({(i.documentID, i.targetID) for i, r in all_items}) items_completed = len([i for i, r in all_items if r and r.completed]) docs_completed = docs_total - len( - {i.documentID for i, r in all_items if r is None or not r.completed} + {(i.documentID, i.targetID) for i, r in all_items if r is None or not r.completed} ) + items_total = len(all_items) if not unfinished_items: return ( None, items_completed, + items_total, docs_completed, [], [], @@ -309,10 +313,11 @@ def next_document_for_user_mqmesa(self, user): return ( next_item, # the first unannotated item for the user items_completed, # the number of completed items in the task + items_total, docs_completed, # the number of completed documents in the task + docs_total, # the total number of documents in the task doc_items, # all items from the current document doc_items_results, # all score results from the current document - docs_total, # the total number of documents in the task ) def get_results_for_each_item(self, block_items, user): diff --git a/EvalView/static/EvalView/css/direct-assessment-document-mqm-esa.css b/EvalView/static/EvalView/css/direct-assessment-document-mqm-esa.css index d17ab3e7..f7acdc0b 100644 --- a/EvalView/static/EvalView/css/direct-assessment-document-mqm-esa.css +++ b/EvalView/static/EvalView/css/direct-assessment-document-mqm-esa.css @@ -73,6 +73,13 @@ width: 100%; } +.source-text > img { + display: block; + margin-left: auto; + margin-right: auto; + width: 45%; +} + .tutorial-text { text-align: center; color: #257; diff --git a/EvalView/static/EvalView/js/direct-assessment-document-mqm-esa.js b/EvalView/static/EvalView/js/direct-assessment-document-mqm-esa.js index 3a7e9e77..8f2c3844 100644 --- a/EvalView/static/EvalView/js/direct-assessment-document-mqm-esa.js +++ b/EvalView/static/EvalView/js/direct-assessment-document-mqm-esa.js @@ -75,6 +75,8 @@ const ERROR_TYPES = { }, "Other": {}, } + + Object.keys(SEVERITY_TO_COLOR).map((key) => { $(`#instruction_sev_${key}`).css("background-color", SEVERITY_TO_COLOR[key]) }) @@ -311,8 +313,14 @@ class MQMItemHandler { } this.mqm_submitted = structuredClone(this.mqm) this.mqm_orig = JSON.parse(JSON.parse(this.el.children('#mqm-payload-orig').html())) - this.text_source_orig = decodeEntities(JSON.parse(this.el.children('#text-source-payload').html()).trim()) - this.source_video = JSON.parse(this.el.children('#text-source-payload').html()).trim().startsWith(" { + if (v == "\n") { + return "
" // preserve newlines + } return `${v}` }).join("") + " [MISSING]" this.el_target.html(html_target) @@ -357,8 +367,11 @@ class MQMItemHandler { } // handle character alignment estimation - if (!this.source_video) { + if (!this.source_is_multimodal) { let html_source = this.text_source_orig.split("").map((v, i) => { + if (v == "\n") { + return "
" // preserve newlines + } return `${v}` }).join("") this.el_source.html(html_source) diff --git a/EvalView/templates/EvalView/_instructions-esa.html b/EvalView/templates/EvalView/_instructions-esa.html index 1117e29f..05e1fa0b 100644 --- a/EvalView/templates/EvalView/_instructions-esa.html +++ b/EvalView/templates/EvalView/_instructions-esa.html @@ -1,13 +1,13 @@
    -
  • Highligh errors: +
  • Highlighting errors:
    • Select the part of translation where you have identified a translation error (drag or click start & end).
    • Click on the highlight to change error severity (minor/major) or remove the highlight.
  • -
  • Choose error severity: +
  • Choose error severity:
    • Minor errors: Style, grammar, word choice could be better or more natural.
    • Major errors:: The meaning is changed significantly and/or the part is really hard to understand.
    • diff --git a/EvalView/templates/EvalView/direct-assessment-document-mqm-esa.html b/EvalView/templates/EvalView/direct-assessment-document-mqm-esa.html index 3de8e146..c9df8bba 100644 --- a/EvalView/templates/EvalView/direct-assessment-document-mqm-esa.html +++ b/EvalView/templates/EvalView/direct-assessment-document-mqm-esa.html @@ -22,7 +22,7 @@ Completed {{docs_completed}}/{{docs_total}} documents, - {{items_completed}}/100 segments + {{items_completed}}/{{items_total}} segments diff --git a/EvalView/views.py b/EvalView/views.py index 40b4e7b5..fbe85cff 100644 --- a/EvalView/views.py +++ b/EvalView/views.py @@ -1133,10 +1133,11 @@ def direct_assessment_document_mqmesa(campaign, current_task, request): ( next_item, items_completed, + items_total, docs_completed, + docs_total, doc_items, doc_items_results, - docs_total, ) = current_task.next_document_for_user_mqmesa(request.user) if not next_item: @@ -1151,11 +1152,15 @@ def direct_assessment_document_mqmesa(campaign, current_task, request): # Send response to the Ajax POST request return JsonResponse(context) - # TODO: hotfix for WMT24 + # TODO: hotfix for WMT24 and WMT25 # Tracking issue: https://github.com/AppraiseDev/Appraise/issues/185 for item in doc_items: - # don't escape HTML video or images - if item.sourceText.strip().startswith("