File size: 5,072 Bytes
f1846be 164cb45 4ebac20 164cb45 4ebac20 164cb45 4ebac20 164cb45 4ebac20 164cb45 4ebac20 164cb45 4ebac20 164cb45 4ebac20 ddae422 4ebac20 ddae422 4ebac20 ddae422 4ebac20 ddae422 4ebac20 164cb45 4ebac20 164cb45 4ebac20 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import os
import torch
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
from plotly.subplots import make_subplots
import plotly.graph_objects as go
def z_score(y, mean=.04853076, sd=.9409466):
return (y - mean) / sd
def indicator_plot(value, title, value_range, domain):
plot = go.Indicator(
mode = "gauge+delta",
value = value,
domain = domain,
title = title,
delta = {
'reference': 0,
'decreasing': {'color': "#ec4899"},
'increasing': {'color': "#36def1"}
},
gauge = {
'axis': {'range': value_range, 'tickwidth': 1, 'tickcolor': "black"},
'bar': {'color': "#4361ee"},
'bgcolor': "white",
'borderwidth': 2,
'bordercolor': "#efefef",
'steps': [
{'range': [value_range[0], 0], 'color': '#efefef'},
{'range': [0, value_range[1]], 'color': '#efefef'}
],
'threshold': {
'line': {'color': "#4361ee", 'width': 8},
'thickness': 0.75,
'value': value
}
}
)
return plot
body = """
# NLP for Item Desirability Ratings
This web application accompanies the paper *Leveraging Natural Language Processing for Item Desirability Ratings:
A Machine-Based Alternative to Human Judges* submitted to the Journal *Personality and Individual Differences*.
## What is this research about?
Researchers use personality scales to measure people's traits and behaviors, but biases can affect the accuracy of these scales.
Socially desirable responding is a common bias that can skew results. To overcome this, researchers gather item desirability ratings, e.g., to ensure that questions are neutral.
Recently, advancements in natural language processing have made it possible to use machines to estimate social desirability ratings,
which can provide a viable alternative to human ratings and help researchers, scale developers, and practitioners improve the accuracy of personality scales.
## Try it yourself!
Use the text field below to enter a statement that might be part of a psychological questionnaire (e.g., "I love a good fight.").
The left dial will indicate how socially desirable it might be to endorse this item.
The right dial indicates sentiment (i.e., valence) as estimated by regular sentiment analysis (using the `cardiffnlp/twitter-xlm-roberta-base-sentiment` model).
"""
st.markdown(body)
input_text = st.text_input(
label='Estimate item desirability:',
value='I love a good fight.',
placeholder='Enter item'
)
# desirability model
# remote or local?
if os.environ.get("item-desirability"):
model_path = 'magnolia-psychometrics/item-desirability'
else:
model_path = '/nlp/nlp/models/finetuned/twitter-xlm-roberta-base-regressive-desirability-ft-4'
auth_token = os.environ.get("item-desirability") or True
if 'tokenizer' not in globals():
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_path,
use_fast=True,
use_auth_token=auth_token
)
if 'model' not in globals():
model = AutoModelForSequenceClassification.from_pretrained(
pretrained_model_name_or_path=model_path,
num_labels=1,
ignore_mismatched_sizes=True,
use_auth_token=auth_token
)
# sentiment classifier
if 'classifier' not in globals():
sentiment_model = 'cardiffnlp/twitter-xlm-roberta-base-sentiment'
classifier = pipeline("sentiment-analysis", model=sentiment_model, tokenizer=sentiment_model, use_fast=False, top_k=3)
classifier_output = classifier(input_text)
classifier_output_dict = {x['label']: x['score'] for x in classifier_output[0]}
classifier_score = classifier_output_dict['positive'] - classifier_output_dict['negative']
if input_text:
inputs = tokenizer(input_text, padding=True, return_tensors='pt')
with torch.no_grad():
score = model(**inputs).logits.squeeze().tolist()
z = z_score(score)
p1 = indicator_plot(
value=z,
title=f"Item Desirability",
value_range=[-4, 4],
domain={'x': [0, .45], 'y': [0, 1]},
)
p2 = indicator_plot(
value=classifier_score,
title=f"Item Sentiment",
value_range=[-1, 1],
domain={'x': [.55, 1], 'y': [0, 1]}
)
fig = go.Figure()
fig.add_trace(p1)
fig.add_trace(p2)
fig.update_layout(
title=dict(text=f'"{input_text}"', font=dict(size=36),yref='paper'),
paper_bgcolor = "white",
font = {'color': "black", 'family': "Arial"})
st.plotly_chart(fig, theme=None, use_container_width=True)
notes = """
Item desirability: z-transformed values, 0 indicated "neutral".
Item sentiment: Absolute differences between positive and negative sentiment.
"""
st.markdown(notes) |