|
1 | 1 | dataset: pubmed_qa |
2 | 2 | subset: pqa_labeled |
3 | 3 | templates: |
| 4 | + 00584766-2415-4d10-ab76-bf86058faa07: !Template |
| 5 | + answer_choices: null |
| 6 | + id: 00584766-2415-4d10-ab76-bf86058faa07 |
| 7 | + jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\nAnd\ |
| 8 | + \ given keywords: {{context.meshes | join(\", \")}}.\n \nWhat is the question\ |
| 9 | + \ answered by the above research abstract? \n|||\n{{question}} " |
| 10 | + metadata: !TemplateMetadata |
| 11 | + choices_in_prompt: false |
| 12 | + metrics: |
| 13 | + - BLEU |
| 14 | + - ROUGE |
| 15 | + original_task: false |
| 16 | + name: Generate Question Title with meshes |
| 17 | + reference: '' |
4 | 18 | 00f58886-e04a-4efb-bf41-cfcbd00a5e7d: !Template |
5 | 19 | answer_choices: null |
6 | 20 | id: 00f58886-e04a-4efb-bf41-cfcbd00a5e7d |
7 | | - jinja: "\"{{ context.contexts | join(\", \") }}\"\n\nAnswer the following question.\n\ |
8 | | - \nQ: \"{{question}}\" ||| \n{{long_answer}}\n" |
| 21 | + jinja: "Given a PubMed abstract: {{ context.contexts | join(\", \") }}\n\nAnswer\ |
| 22 | + \ this question: \"{{question}}\" \n||| \n{{long_answer}}\n" |
9 | 23 | metadata: !TemplateMetadata |
10 | | - choices_in_prompt: null |
11 | | - metrics: [] |
12 | | - original_task: null |
| 24 | + choices_in_prompt: false |
| 25 | + metrics: |
| 26 | + - BLEU |
| 27 | + - ROUGE |
| 28 | + - Other |
| 29 | + original_task: false |
13 | 30 | name: Question Answering (Long) |
14 | 31 | reference: Provide a long/verbose answer to the provided question |
15 | 32 | 0b630e04-02a8-46d6-b164-a41cd34042ff: !Template |
16 | 33 | answer_choices: null |
17 | 34 | id: 0b630e04-02a8-46d6-b164-a41cd34042ff |
18 | | - jinja: '"{{ context.contexts | join(", ") }}" |
19 | | -
|
20 | | -
|
21 | | - What is the main question answered by the above research abstract? ||| |
22 | | -
|
23 | | - {{question}} ' |
| 35 | + jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\n\nWhat\ |
| 36 | + \ is the question answered by the above research abstract? \n|||\n{{question}} " |
24 | 37 | metadata: !TemplateMetadata |
25 | | - choices_in_prompt: null |
26 | | - metrics: [] |
27 | | - original_task: null |
| 38 | + choices_in_prompt: false |
| 39 | + metrics: |
| 40 | + - BLEU |
| 41 | + - ROUGE |
| 42 | + original_task: false |
28 | 43 | name: 'Generate Question Title ' |
29 | 44 | reference: Given abstract, generate title (which is in the form of a question) |
30 | 45 | 1e0a77f8-0eb4-40a1-814d-8a111df66e5e: !Template |
31 | | - answer_choices: null |
| 46 | + answer_choices: 'Yes ||| No ||| Maybe' |
32 | 47 | id: 1e0a77f8-0eb4-40a1-814d-8a111df66e5e |
33 | | - jinja: "Q: \"{{ question }}\" \n\nA: \"{{ long_answer }}\"\n\nSummarize the above\ |
34 | | - \ answer as: YES, NO or MAYBE ? |||\n{{final_decision}}" |
| 48 | + jinja: "Question: \"{{ question }}\" \n\nAnswer: \"{{ long_answer }}\"\n\nSummarize\ |
| 49 | + \ the above answer as YES, NO, or MAYBE? \n|||\n{{final_decision}}" |
35 | 50 | metadata: !TemplateMetadata |
36 | | - choices_in_prompt: null |
37 | | - metrics: [] |
38 | | - original_task: null |
| 51 | + choices_in_prompt: true |
| 52 | + metrics: |
| 53 | + - Accuracy |
| 54 | + original_task: false |
39 | 55 | name: Long Answer to Final Decision |
40 | 56 | reference: Given a question, the full text of the relevant answer, summarize a |
41 | 57 | yes/no/maybe answer |
42 | 58 | 21240f74-530a-47b7-a5d9-a6a13083b72e: !Template |
43 | | - answer_choices: null |
| 59 | + answer_choices: '{{context.labels | join("|||")}}' |
44 | 60 | id: 21240f74-530a-47b7-a5d9-a6a13083b72e |
45 | | - jinja: '{% set n_sections = context.contexts | length %} |
46 | | -
|
47 | | - {% set choice = range(0, n_sections) | random %} |
48 | | -
|
49 | | -
|
50 | | - "{{ context.contexts[choice] }}" |
51 | | -
|
52 | | -
|
53 | | - In a research article, the above text would most likely be found in which section: {{ |
54 | | - context.labels[:-1] | join(", ") }} or {{ context.labels[-1] }} ? ||| |
55 | | -
|
56 | | -
|
57 | | - {{ context.labels[choice] }} |
58 | | -
|
59 | | - ' |
| 61 | + jinja: "{% set n_sections = context.contexts | length %}\n{% set choice = range(0,\ |
| 62 | + \ n_sections) | random %}\n\n\"{{ context.contexts[choice] }}\"\n\nThe above\ |
| 63 | + \ text would most likely be found in which section of a biomedical paper: {{\ |
| 64 | + \ context.labels[:-1] | join(\", \") }} or {{ context.labels[-1] }} ? \n|||\n\ |
| 65 | + {{ context.labels[choice] }}\n" |
60 | 66 | metadata: !TemplateMetadata |
61 | | - choices_in_prompt: null |
62 | | - metrics: [] |
63 | | - original_task: null |
| 67 | + choices_in_prompt: false |
| 68 | + metrics: |
| 69 | + - Accuracy |
| 70 | + original_task: false |
64 | 71 | name: Context Section Type |
65 | 72 | reference: Assign text in an abstract to specific paper section headers |
66 | 73 | 45cb344c-bb36-492a-ace0-7cfc897e127a: !Template |
67 | 74 | answer_choices: null |
68 | 75 | id: 45cb344c-bb36-492a-ace0-7cfc897e127a |
69 | | - jinja: '"{{ context.contexts | join(", ") }}" |
70 | | -
|
71 | | -
|
72 | | - What are the MeSH terms for this PubMed abstract? ||| |
73 | | -
|
74 | | - {{ context.meshes | join(", ") }}' |
| 76 | + jinja: "Given a PubMed abstract:{{ context.contexts | join(\", \") }}\n\nWhat\ |
| 77 | + \ are the MeSH (Medical Subject Headings) terms for this? \n|||\n{{ context.meshes | join(\", \") }}" |
75 | 78 | metadata: !TemplateMetadata |
76 | | - choices_in_prompt: null |
77 | | - metrics: [] |
78 | | - original_task: null |
| 79 | + choices_in_prompt: false |
| 80 | + metrics: |
| 81 | + - BLEU |
| 82 | + - ROUGE |
| 83 | + - Other |
| 84 | + original_task: false |
79 | 85 | name: Medical Subject Headings |
80 | 86 | reference: 'Predict the set of MeSH terms for a given PubMed abstract ' |
81 | | - 91d481e5-fac6-4532-b013-5ac1235b6e1a: !Template |
| 87 | + 48ee58bb-6a4a-4667-9d9c-69618408c6ce: !Template |
82 | 88 | answer_choices: null |
| 89 | + id: 48ee58bb-6a4a-4667-9d9c-69618408c6ce |
| 90 | + jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\nAnd\ |
| 91 | + \ given long answer of a question: {{long_answer}}.\n \nWhat is the question asked by this research paper?\ |
| 92 | + \ \n|||\n{{question}} " |
| 93 | + metadata: !TemplateMetadata |
| 94 | + choices_in_prompt: false |
| 95 | + metrics: |
| 96 | + - BLEU |
| 97 | + - ROUGE |
| 98 | + original_task: false |
| 99 | + name: Generate Question Title with long answer |
| 100 | + reference: '' |
| 101 | + 91d481e5-fac6-4532-b013-5ac1235b6e1a: !Template |
| 102 | + answer_choices: 'Yes ||| No ||| Maybe' |
83 | 103 | id: 91d481e5-fac6-4532-b013-5ac1235b6e1a |
84 | | - jinja: '"{{ context.contexts | join(", ") }}" |
85 | | -
|
86 | | -
|
87 | | - Answer the following question as YES, NO, MAYBE |
88 | | -
|
89 | | -
|
90 | | - Q: "{{question}}" |
91 | | -
|
92 | | -
|
93 | | - A: ||| {{final_decision}} |
94 | | -
|
95 | | -
|
96 | | - ' |
| 104 | + jinja: "Given a PubMed abstract: {{ context.contexts | join(\", \") }}\n\nAnswer\ |
| 105 | + \ the question: \"{{question}}\" as YES, NO, MAYBE.\n||| \n{{final_decision}}\n\ |
| 106 | + \n" |
97 | 107 | metadata: !TemplateMetadata |
98 | | - choices_in_prompt: null |
99 | | - metrics: [] |
| 108 | + choices_in_prompt: false |
| 109 | + metrics: |
| 110 | + - Accuracy |
100 | 111 | original_task: true |
101 | 112 | name: Question Answering (Short) |
102 | 113 | reference: Answer the following question using the provided abstract text |
0 commit comments