|
---
|
|
dataset_info:
|
|
- config_name: finance
|
|
features:
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: c_root_id_A
|
|
dtype: string
|
|
- name: c_root_id_B
|
|
dtype: string
|
|
- name: created_at_utc_A
|
|
dtype: float64
|
|
- name: created_at_utc_B
|
|
dtype: float64
|
|
- name: score_A
|
|
dtype: int64
|
|
- name: score_B
|
|
dtype: int64
|
|
- name: human_ref_A
|
|
dtype: string
|
|
- name: human_ref_B
|
|
dtype: string
|
|
- name: labels
|
|
dtype: int64
|
|
- name: overlap_ratio
|
|
dtype: float64
|
|
- name: seconds_difference
|
|
dtype: float64
|
|
- name: score_ratio
|
|
dtype: float64
|
|
- name: len_ratio
|
|
dtype: float64
|
|
splits:
|
|
- name: validation_pref
|
|
num_bytes: 106745033
|
|
num_examples: 55518
|
|
- name: test_pref
|
|
num_bytes: 109417097
|
|
num_examples: 58603
|
|
- name: train_pref
|
|
num_bytes: 4167127747
|
|
num_examples: 2245068
|
|
download_size: 323727636
|
|
dataset_size: 4383289877
|
|
- config_name: finance_test_prompts
|
|
features:
|
|
- name: rank
|
|
dtype: int64
|
|
- name: only_comment
|
|
dtype: bool
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: c_root_id
|
|
dtype: string
|
|
- name: created_at_utc
|
|
dtype: float64
|
|
- name: score
|
|
dtype: int64
|
|
- name: human_ref
|
|
dtype: string
|
|
splits:
|
|
- name: test_prompts
|
|
num_bytes: 6843041
|
|
num_examples: 4929
|
|
download_size: 3194622
|
|
dataset_size: 6843041
|
|
- config_name: gender_sexuality
|
|
features:
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: c_root_id_A
|
|
dtype: string
|
|
- name: c_root_id_B
|
|
dtype: string
|
|
- name: created_at_utc_A
|
|
dtype: float64
|
|
- name: created_at_utc_B
|
|
dtype: float64
|
|
- name: score_A
|
|
dtype: int64
|
|
- name: score_B
|
|
dtype: int64
|
|
- name: human_ref_A
|
|
dtype: string
|
|
- name: human_ref_B
|
|
dtype: string
|
|
- name: labels
|
|
dtype: int64
|
|
- name: overlap_ratio
|
|
dtype: float64
|
|
- name: seconds_difference
|
|
dtype: float64
|
|
- name: score_ratio
|
|
dtype: float64
|
|
- name: len_ratio
|
|
dtype: float64
|
|
splits:
|
|
- name: validation_pref
|
|
num_bytes: 344495496
|
|
num_examples: 200193
|
|
- name: test_pref
|
|
num_bytes: 314580732
|
|
num_examples: 182706
|
|
- name: train_pref
|
|
num_bytes: 12750359301
|
|
num_examples: 7302867
|
|
download_size: 1072838873
|
|
dataset_size: 13409435529
|
|
- config_name: gender_sexuality_test_prompts
|
|
features:
|
|
- name: rank
|
|
dtype: int64
|
|
- name: only_comment
|
|
dtype: bool
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: c_root_id
|
|
dtype: string
|
|
- name: created_at_utc
|
|
dtype: float64
|
|
- name: score
|
|
dtype: int64
|
|
- name: human_ref
|
|
dtype: string
|
|
splits:
|
|
- name: test_prompts
|
|
num_bytes: 24889982
|
|
num_examples: 17485
|
|
download_size: 11276178
|
|
dataset_size: 24889982
|
|
- config_name: history
|
|
features:
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: c_root_id_A
|
|
dtype: string
|
|
- name: c_root_id_B
|
|
dtype: string
|
|
- name: created_at_utc_A
|
|
dtype: float64
|
|
- name: created_at_utc_B
|
|
dtype: float64
|
|
- name: score_A
|
|
dtype: int64
|
|
- name: score_B
|
|
dtype: int64
|
|
- name: human_ref_A
|
|
dtype: string
|
|
- name: human_ref_B
|
|
dtype: string
|
|
- name: labels
|
|
dtype: int64
|
|
- name: overlap_ratio
|
|
dtype: float64
|
|
- name: seconds_difference
|
|
dtype: float64
|
|
- name: score_ratio
|
|
dtype: float64
|
|
- name: len_ratio
|
|
dtype: float64
|
|
splits:
|
|
- name: validation_pref
|
|
num_bytes: 11033885
|
|
num_examples: 7255
|
|
- name: test_pref
|
|
num_bytes: 8347339
|
|
num_examples: 5511
|
|
- name: train_pref
|
|
num_bytes: 470504584
|
|
num_examples: 290723
|
|
download_size: 48258052
|
|
dataset_size: 489885808
|
|
- config_name: history_test_prompts
|
|
features:
|
|
- name: rank
|
|
dtype: int64
|
|
- name: only_comment
|
|
dtype: bool
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: c_root_id
|
|
dtype: string
|
|
- name: created_at_utc
|
|
dtype: float64
|
|
- name: score
|
|
dtype: int64
|
|
- name: human_ref
|
|
dtype: string
|
|
splits:
|
|
- name: test_prompts
|
|
num_bytes: 1461426
|
|
num_examples: 1027
|
|
download_size: 818158
|
|
dataset_size: 1461426
|
|
- config_name: politics
|
|
features:
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: c_root_id_A
|
|
dtype: string
|
|
- name: c_root_id_B
|
|
dtype: string
|
|
- name: created_at_utc_A
|
|
dtype: float64
|
|
- name: created_at_utc_B
|
|
dtype: float64
|
|
- name: score_A
|
|
dtype: int64
|
|
- name: score_B
|
|
dtype: int64
|
|
- name: human_ref_A
|
|
dtype: string
|
|
- name: human_ref_B
|
|
dtype: string
|
|
- name: labels
|
|
dtype: int64
|
|
- name: overlap_ratio
|
|
dtype: float64
|
|
- name: seconds_difference
|
|
dtype: float64
|
|
- name: score_ratio
|
|
dtype: float64
|
|
- name: len_ratio
|
|
dtype: float64
|
|
splits:
|
|
- name: validation_pref
|
|
num_bytes: 144623174
|
|
num_examples: 83290
|
|
- name: test_pref
|
|
num_bytes: 162259697
|
|
num_examples: 95042
|
|
- name: train_pref
|
|
num_bytes: 5691098839
|
|
num_examples: 3411735
|
|
download_size: 453464096
|
|
dataset_size: 5997981710
|
|
- config_name: politics_test_prompts
|
|
features:
|
|
- name: rank
|
|
dtype: int64
|
|
- name: only_comment
|
|
dtype: bool
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: c_root_id
|
|
dtype: string
|
|
- name: created_at_utc
|
|
dtype: float64
|
|
- name: score
|
|
dtype: int64
|
|
- name: human_ref
|
|
dtype: string
|
|
splits:
|
|
- name: test_prompts
|
|
num_bytes: 9753539
|
|
num_examples: 8156
|
|
download_size: 4811733
|
|
dataset_size: 9753539
|
|
- config_name: science
|
|
features:
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: c_root_id_A
|
|
dtype: string
|
|
- name: c_root_id_B
|
|
dtype: string
|
|
- name: created_at_utc_A
|
|
dtype: float64
|
|
- name: created_at_utc_B
|
|
dtype: float64
|
|
- name: score_A
|
|
dtype: int64
|
|
- name: score_B
|
|
dtype: int64
|
|
- name: human_ref_A
|
|
dtype: string
|
|
- name: human_ref_B
|
|
dtype: string
|
|
- name: labels
|
|
dtype: int64
|
|
- name: overlap_ratio
|
|
dtype: float64
|
|
- name: seconds_difference
|
|
dtype: float64
|
|
- name: score_ratio
|
|
dtype: float64
|
|
- name: len_ratio
|
|
dtype: float64
|
|
splits:
|
|
- name: validation_pref
|
|
num_bytes: 49143432
|
|
num_examples: 28225
|
|
- name: test_pref
|
|
num_bytes: 49485277
|
|
num_examples: 28595
|
|
- name: train_pref
|
|
num_bytes: 1886530811
|
|
num_examples: 1092551
|
|
download_size: 214600434
|
|
dataset_size: 1985159520
|
|
- config_name: science_test_prompts
|
|
features:
|
|
- name: rank
|
|
dtype: int64
|
|
- name: only_comment
|
|
dtype: bool
|
|
- name: domain
|
|
dtype: string
|
|
- name: post_id
|
|
dtype: string
|
|
- name: title
|
|
dtype: string
|
|
- name: history
|
|
dtype: string
|
|
- name: c_root_id
|
|
dtype: string
|
|
- name: created_at_utc
|
|
dtype: float64
|
|
- name: score
|
|
dtype: int64
|
|
- name: human_ref
|
|
dtype: string
|
|
splits:
|
|
- name: test_prompts
|
|
num_bytes: 6695945
|
|
num_examples: 5495
|
|
download_size: 3505358
|
|
dataset_size: 6695945
|
|
configs:
|
|
- config_name: finance
|
|
data_files:
|
|
- split: validation_pref
|
|
path: finance/validation_pref-*
|
|
- split: test_pref
|
|
path: finance/test_pref-*
|
|
- split: train_pref
|
|
path: finance/train_pref-*
|
|
- config_name: finance_test_prompts
|
|
data_files:
|
|
- split: test_prompts
|
|
path: finance_test_prompts/test_prompts-*
|
|
- config_name: gender_sexuality
|
|
data_files:
|
|
- split: validation_pref
|
|
path: gender_sexuality/validation_pref-*
|
|
- split: test_pref
|
|
path: gender_sexuality/test_pref-*
|
|
- split: train_pref
|
|
path: gender_sexuality/train_pref-*
|
|
- config_name: gender_sexuality_test_prompts
|
|
data_files:
|
|
- split: test_prompts
|
|
path: gender_sexuality_test_prompts/test_prompts-*
|
|
- config_name: history
|
|
data_files:
|
|
- split: validation_pref
|
|
path: history/validation_pref-*
|
|
- split: test_pref
|
|
path: history/test_pref-*
|
|
- split: train_pref
|
|
path: history/train_pref-*
|
|
- config_name: history_test_prompts
|
|
data_files:
|
|
- split: test_prompts
|
|
path: history_test_prompts/test_prompts-*
|
|
- config_name: politics
|
|
data_files:
|
|
- split: validation_pref
|
|
path: politics/validation_pref-*
|
|
- split: test_pref
|
|
path: politics/test_pref-*
|
|
- split: train_pref
|
|
path: politics/train_pref-*
|
|
- config_name: politics_test_prompts
|
|
data_files:
|
|
- split: test_prompts
|
|
path: politics_test_prompts/test_prompts-*
|
|
- config_name: science
|
|
data_files:
|
|
- split: validation_pref
|
|
path: science/validation_pref-*
|
|
- split: test_pref
|
|
path: science/test_pref-*
|
|
- split: train_pref
|
|
path: science/train_pref-*
|
|
- config_name: science_test_prompts
|
|
data_files:
|
|
- split: test_prompts
|
|
path: science_test_prompts/test_prompts-*
|
|
license: odc-by
|
|
language:
|
|
- en
|
|
pretty_name: c
|
|
---
|
|
# Dataset Card for "compred" |
|
|
|
## Dataset Details |
|
|
|
### Dataset Description |
|
|
|
Conventional algorithms for training language models (LMs) with human feedback rely on preferences that are assumed to account for an average user, |
|
disregarding subjectivity and finer-grained variations. Recent studies have raised concerns about aggregating such diverse, often contradictory human feedback to train |
|
a single universal reward model, questioning which values or voices the models align with. |
|
Finetuning models to maximize such reward results in generic models that produce outputs not preferred by many user groups, |
|
as they tend to average out styles and norms. To study this issue, we collect and release **ComPRed**, |
|
a question-answering dataset with community-level preferences from Reddit. |
|
This dataset facilitates studying diversity in preferences without incurring privacy concerns associated with individual feedback. |
|
|
|
- **Curated by:** Allen Institute for AI, The Ohio State University, Carnegie Mellon University |
|
- **Paper:** [ArXiv](#) |
|
- **Repository:** [https://github.com/allenai/compred](https://github.com/allenai/compred) |
|
- **Language(s) (NLP):** English |
|
- **License:** https://opendatacommons.org/licenses/by/odc_by_1.0_public_text.txt |
|
- **Point of Contact:** [Sachin Kumar & Chan Young Park](mailto:[email protected], [email protected]) |
|
|
|
|
|
## Uses |
|
|
|
ComPreD contains five subsets divided based on factors driving diverging user preferences (we followed a similar process as [SHP](https://huggingface.co/datasets/stanfordnlp/SHP) to create this dataset). |
|
|
|
| Subset(s) | Factor | |
|
| -------- | ------- | |
|
| politics | Ideologies | |
|
| gender_and_sexuality | Demographics | |
|
| finance, history | Community Norms | |
|
| science | Level of expertise / Community Norms | |
|
|
|
### Loading |
|
|
|
```python |
|
from datasets import load_dataset |
|
|
|
|
|
# load finance train set |
|
finance_train_pref = load_dataset("allenai/compred", "finance", split="train_pref") |
|
|
|
# load finance test prompts |
|
finance_test_prompts = load_dataset("allenai/compred", "finance_test_prompts", split="test_prompts") |
|
``` |
|
|
|
### Dataset Structure |
|
|
|
Coming soon |
|
|
|
<!-- Preference data in each subset (train_pref, validation_pref, test_pref) has the following fields: |
|
- id (str): a unique identifier |
|
- prompt (`str`): the instruction/query which can be safely complied with |
|
- chosen (`str`): the compliant response from a stronger model |
|
- chosen_model (`str`): gpt-4 |
|
- rejected (`str`): the noncompliant response from a weaker model |
|
- rejected_model (`str`): where applicable |
|
|
|
Test prompts in each subset (test_prompts) has the following fields: |
|
|
|
- id (str): a unique identifier |
|
- prompt (str): the instruction/query which should NOT be complied with (original set) or should be complied with (contrast) |
|
- response (str): the noncompliant or compliant response (only in train split) |
|
- category (str): a high-level noncompliance category defined in our taxonomy including: "incomplete requests", "unsupported requests", "indeterminate requests", "humanizing requests", and "requests with safety concerns" |
|
- subcategory (str): a fine-grained subcategory under each category |
|
--> |
|
|
|
### Data Creation |
|
|
|
Please refer to our [paper](#) for details on our dataset collection. |
|
|
|
|
|
## Licensing Information |
|
|
|
ComPreD is made available under the ODC-BY requiring the user to follow the licenses of the subsequent parts. |
|
|
|
## Citation |
|
|
|
``` |
|
@article{kumar-park2024, |
|
title={{Personalized LMs: Aligning Language Models with Diverse Human Preferences}}, |
|
author={Sachin Kumar, Chan Young Park, Yulia Tsvetkov, Noah A. Smith, Hannaneh Hajishirzi}, |
|
journal={}, |
|
year={2024} |
|
} |
|
``` |