File size: 4,359 Bytes
94e0f09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
def download_civilcomments(data_path: str | Path) -> None:
    print("Downloading CivilComments...")
    civilcomments_dir = Path(data_path) / "civilcomments"
    civilcomments_dir.mkdir(parents=True, exist_ok=True)
    download_and_extract(
        "https://worksheets.codalab.org/rest/bundles/0x8cd3de0634154aeaad2ee6eb96723c6e/contents/blob/",
        str(civilcomments_dir / "civilcomments.tar.gz"),
    )


def process_civilcomments(data_path: str | Path, dst: str | Path, keep_raw: bool = False) -> None:
    print("Processing CivilComments...")
    df = pd.read_csv(Path(data_path) / "civilcomments/all_data_with_identities.csv", index_col=0)
    if keep_raw:
        ds = Dataset.from_pandas(df, preserve_index=False)
        
        # save locally
        ds.save_to_disk(str(Path(dst) / "civilcomments-wilds" / "raw"))
        
        # push to hub
        ds.push_to_hub("<repo name>", "raw")

    # extract labels, features, and metadata
    input_output_vars = ["id", "split", "comment_text", "toxicity"]
    identity_vars = ["male", "female", "LGBTQ", "christian", "muslim", "other_religions", "black", "white"]
    auxiliary_vars = [
        "identity_any",
        "severe_toxicity",
        "obscene",
        "threat",
        "insult",
        "identity_attack",
        "sexual_explicit",
    ]

    # remove instances where label or text is missing
    df = df.loc[df[input_output_vars[-2:]].isna().sum(1) == 0]

    # remove instances where label < 0
    df = df.loc[df[input_output_vars[-1]] >= 0]

    # keep only columns we need
    df = df.loc[:, input_output_vars + identity_vars + auxiliary_vars]

    # encode label and identity attributes
    cols = [input_output_vars[-1]] + identity_vars + auxiliary_vars
    df[cols] = (df[cols] >= 0.5).astype(int)

    # fmt: off
    # deduplicate
    gdf = df.groupby("comment_text")[identity_vars + ["split", "toxicity"]].agg("nunique")
    gdf["multiple"] = (gdf != 1).sum(1)

    print(f"""
    There are {df["comment_text"].duplicated().sum()} exact duplicates (i.e., same `comment_text`). 
    Of these, only {len(gdf.query("multiple > 0"))} are unique `comment_text`.

    Some duplicates appear with different attributes and labels, and some even in multiple splits. 
    In particular,

    {(gdf[identity_vars + ["split", "toxicity"]] > 1).sum()}
    """)

    # if duplicates it keeps:
    #   - the occurrence in the validation set, or
    #   - the one with higher toxicity, or
    #   - the one with higher identity_vars (in order they appears in the list)
    #   - the one with higher auxiliary_vars (in order they appears in the list)
    print(f"Length before deduplication: {len(df)}")
    df = (
        df.sort_values(["comment_text", "split", "toxicity"] + identity_vars + auxiliary_vars, ascending=False)
        .drop_duplicates(subset="comment_text", keep="first")
    )
    print(f"Length after deduplication: {len(df)}")

    # add column with all identity attributes
    df = (
        df.assign(active_attributes=lambda _df: _df[identity_vars].values.tolist())
        .assign(
            active_attributes=lambda _df: _df["active_attributes"].map(
                lambda lst: [name for idx, name in zip(lst, identity_vars, strict=True) if idx == 1]
            )
        )
    )
    # fmt: on

    # add column to flag whether any active attribute is present
    assert ((df[identity_vars].sum(1) != 0) == (df["active_attributes"].map(len) > 0)).all()  # simple check
    df["has_active_attrs"] = df[identity_vars].sum(1) != 0

    # add unique identifier as first column
    df["uid"] = list(range(len(df)))

    # reorder columns nicely
    cols = df.columns.tolist()
    df = df[["uid"] + input_output_vars + ["has_active_attrs", "active_attributes"] + identity_vars + auxiliary_vars]

    # convert to DatasetDict
    ds_dict = {}
    for split in df["split"].unique().tolist():
        ds = Dataset.from_pandas(df.query(f"split == '{split}'").drop(columns=["split"]), preserve_index=False)
        ds = ds.cast_column("toxicity", ClassLabel(num_classes=2, names=["non-toxic", "toxic"]))
        ds_dict[split if split != "val" else "validation"] = ds
    ds_dict = DatasetDict(ds_dict)

    # save locally
    ds_dict.save_to_disk(str(Path(dst) / "civilcomments-wilds" / "texts"))
    
    # push to hub
    ds_dict.push_to_hub("<repo name>", "default")