-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathhs_task_config.yaml
80 lines (80 loc) · 2.09 KB
/
hs_task_config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
aggregation_metric:
type: dynascore
content_warning: This is sensitive content! If you do not want to see any hateful
examples, please switch to another task.
goal:
type: multioptions
options:
- hateful
- non-hateful
field_name_for_the_model: label
context:
type: plain-text
field_names_for_the_model:
context: context
user_input:
- type: text
placeholder: Enter statement
field_name_for_the_model: statement
model_input:
statement: statement
response_fields:
input_by_user: statement
delta_metrics:
- type: fairness
- type: robustness
metadata:
create:
- display_name: example explanation
name: example_explanation
placeholder: Explain why your example is correct...
type: string
- display_name: model explanation
model_wrong_condition: false
name: model_explanation_right
placeholder: Explain why you thought the model would make a mistake...
type: string
- display_name: model explanation
model_wrong_condition: true
name: model_explanation_wrong
placeholder: Explain why you think the model made a mistake...
type: string
validate:
- labels:
- not-hateful
- hateful
name: corrected_label
placeholder: Enter corrected label
type: multiclass
validated_label_condition: incorrect
- name: target_explanation
placeholder: Explain why your proposed target is correct...
type: string
validated_label_condition: incorrect
- name: flag_reason
placeholder: Enter the reason for flagging...
type: string
validated_label_condition: flagged
- name: validator_example_explanation
placeholder: Explain why the example is correct...
type: string
validated_label_condition: correct
- name: validator_model_explanation
placeholder: Enter what you think was done to try to trick the model...
type: string
model_wrong_metric:
reference_names:
- label
type: exact_match
output:
- name: label
- name: prob
reference_name: label
type: prob
perf_metric:
reference_name: label
type: macro_f1
model_output:
model_prediction_label: label
model_evaluation_metric:
metric_name: exact_match