File size: 4,803 Bytes
bc7a465 1939eed bc7a465 e6f2070 bc7a465 1939eed bc7a465 1939eed bc7a465 1939eed bc7a465 1939eed bc7a465 1939eed bc7a465 1939eed bc7a465 1939eed bc7a465 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import pandas as pd
from datasets import load_dataset
from sklearn.model_selection import train_test_split
import urllib
# Set binary labels
HATE = 1
NOT_HATE = 0
# class mapping for the multiclass
class_mapping = {
'target_gender_aggregated': 0,
'target_race_aggregated': 1,
'target_sexuality_aggregated': 2,
'target_religion_aggregated': 3,
'target_origin_aggregated': 4,
'target_disability_aggregated': 5,
'target_age_aggregated': 6,
'not_hate': 7
}
# map continuous score to classes
def map_label(x):
if x >= -1 and x <= 0.5:
label = 999 # neutral/ambiguous
elif x > 0.5:
label = HATE # hate
elif x < -1:
label = NOT_HATE # not hate
return label
# format text
def clean_text(text):
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
new_text = []
for t in text.split():
# MAKE SURE to check lowercase
t = '@user' if t.startswith('@') and len(t) > 1 and t.replace('@','').lower() not in verified_users else t
t = '\{URL\}' if t.startswith('http') else t
new_text.append(t)
return ' '.join(new_text)
# load data
dataset = load_dataset('ucberkeley-dlab/measuring-hate-speech')
df = dataset['train'].to_pandas()
# get label
df['annon_label'] = df['hate_speech_score'].apply(map_label)
# keep only entries from Twitter
df = df[df['platform'] == 2]
# ignore ambiguous
df = df[df['annon_label'].isin([HATE, NOT_HATE])]
# count binary label
df_count_label = pd.DataFrame(df.groupby('comment_id')['annon_label'].value_counts())
df_count_label = df_count_label.rename(columns={'annon_label': 'count'})
df_count_label = df_count_label.reset_index(level=1)
df_count_label = df_count_label[df_count_label['count'] >= 2]
# map binary label
df = df.set_index('comment_id')
df['label'] = None
df['label'] = df_count_label['annon_label']
# drop entries with no agreement
df = df[df['label'].notnull()]
df = df.reset_index()
# find aggrement on targets
targets = ['target_race', 'target_religion', 'target_origin', 'target_gender',
'target_sexuality', 'target_age', 'target_disability']
# for each target count aggrement
for t in targets:
# count and consider only targets with at least 2 coders
df_count_targets = pd.DataFrame(df.groupby('comment_id')[t].value_counts())
df_count_targets = df_count_targets.rename(columns={t: 'count'})
df_count_targets = df_count_targets.reset_index(level=1)
df_count_targets = df_count_targets[df_count_targets['count'] >= 2]
# do not consider entries with more than one target (because of more than 3 coders)
df_count_targets = df_count_targets.loc[df_count_targets.index.drop_duplicates(keep=False)]
# map aggregated target
df = df.set_index('comment_id')
df[f'{t}_aggregated'] = False
df[f'{t}_aggregated'] = df_count_targets[t]
df[f'{t}_aggregated'] = df[f'{t}_aggregated'].fillna(False)
df = df.reset_index()
# aggregate targets
targets_aggregated = [f'{t}_aggregated' for t in targets]
# get columns/target which are True
df['target'] = df[targets_aggregated].apply(lambda row: row[row].index, axis=1)
# set target only if it is unique
df['target'] = df['target'].apply(lambda x: x[0] if len(x) == 1 else None)
# no need of all annotators now -> keep each tweet only once
df = df.groupby('comment_id').nth(0)
df = df.reset_index()
# clean multiclass
# give target only to tweets with 1 (is hate speech) target
idx_multiclass = df[df['label'] == 1].index
idx_not_hate = df[df['label'] == 0].index
# initialize column
df['gold_label'] = None
df.loc[idx_not_hate, 'gold_label'] = 'not_hate'
df.loc[idx_multiclass, 'gold_label'] = df.loc[idx_multiclass]['target']
# drop entries without target
df = df.dropna(subset='gold_label')
# get list of known verified users
verified_users = urllib.request.urlopen('https://raw.githubusercontent.com/cardiffnlp/timelms/main/data/verified_users.v091122.txt').readlines()
verified_users = [x.decode().strip('\n').lower() for x in verified_users]
# clean text
df['text'] = df['text'].apply(clean_text)
# map classes
df['gold_label'] = df['gold_label'].map(class_mapping)
# create splits
test_size = int(0.2 * len(df))
val_size = int(0.1 * len(df))
train, test = train_test_split(df, test_size=test_size, stratify=df['gold_label'].values, random_state=4)
train, val = train_test_split(train, test_size=val_size, stratify=train['gold_label'].values, random_state=4)
# save splits
cols_to_keep = ['gold_label', 'text']
train[cols_to_keep].to_json('../data/tweet_hate/train.jsonl', lines=True, orient='records')
val[cols_to_keep].to_json('../data/tweet_hate/validation.jsonl', lines=True, orient='records')
test[cols_to_keep].to_json('../data/tweet_hate/test.jsonl', lines=True, orient='records') |