|
import os |
|
import torch |
|
import dash |
|
import streamlit as st |
|
import pandas as pd |
|
import json |
|
import random |
|
import firebase_admin |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
from transformers import pipeline |
|
from firebase_admin import credentials, firestore |
|
from dotenv import load_dotenv |
|
import plotly.graph_objects as go |
|
|
|
import demo_section |
|
import explore_data_section |
|
|
|
load_dotenv() |
|
|
|
if 'collect_data' not in st.session_state: |
|
st.session_state.collect_data = True |
|
|
|
if 'user_id' not in st.session_state: |
|
st.session_state.user_id = random.randint(1, 9999999) |
|
|
|
st.markdown(""" |
|
# Machine-Based Item Desirability Ratings |
|
This web application accompanies the paper "*Expanding the Methodological Toolbox: Machine-Based Item Desirability Ratings as an Alternative to Human-Based Ratings*". |
|
|
|
*Hommel, B. E. (2023). Expanding the methodological toolbox: Machine-based item desirability ratings as an alternative to human-based ratings. Personality and Individual Differences, 213, 112307. https://doi.org/10.1016/j.paid.2023.112307* |
|
|
|
|
|
## What is this research about? |
|
Researchers use personality scales to measure people's traits and behaviors, but biases can affect the accuracy of these scales. |
|
Socially desirable responding is a common bias that can skew results. To overcome this, researchers gather item desirability ratings, e.g., to ensure that questions are neutral. |
|
Recently, advancements in natural language processing have made it possible to use machines to estimate social desirability ratings, |
|
which can provide a viable alternative to human ratings and help researchers, scale developers, and practitioners improve the accuracy of personality scales. |
|
""") |
|
|
|
|
|
demo_section.show() |
|
explore_data_section.show() |