Spaces:
Running
Running
import pandas as pd | |
import numpy as np | |
from scikit_posthocs import posthoc_nemenyi | |
from scipy import stats | |
from scipy.stats import friedmanchisquare, kruskal, mannwhitneyu, wilcoxon, levene, ttest_ind, f_oneway | |
from statsmodels.stats.multicomp import MultiComparison | |
from scipy.stats import spearmanr, pearsonr, kendalltau, entropy | |
from scipy.spatial.distance import jensenshannon | |
from scipy.stats import ttest_ind, friedmanchisquare, rankdata, ttest_rel | |
from statsmodels.stats.multicomp import pairwise_tukeyhsd | |
from scipy.stats import ttest_1samp | |
def statistical_tests(data): | |
"""Perform various statistical tests to evaluate potential biases.""" | |
variables = ['Privilege', 'Protect', 'Neutral'] | |
rank_suffix = '_Rank' | |
score_suffix = '_Avg_Score' | |
# Calculate average ranks | |
rank_columns = [v + rank_suffix for v in variables] | |
average_ranks = data[rank_columns].mean() | |
# Statistical tests | |
rank_data = [data[col] for col in rank_columns] | |
# Pairwise tests | |
pairs = [ | |
('Privilege', 'Protect'), | |
('Protect', 'Neutral'), | |
('Privilege', 'Neutral') | |
] | |
pairwise_results = { | |
'Wilcoxon Test': {} | |
} | |
for (var1, var2) in pairs: | |
pair_name_score = f'{var1}{score_suffix} vs {var2}{score_suffix}' | |
pair_rank_score = f'{var1}{rank_suffix} vs {var2}{rank_suffix}' | |
# Wilcoxon Signed-Rank Test | |
if len(data) > 20: | |
wilcoxon_stat, wilcoxon_p = wilcoxon(data[f'{var1}{rank_suffix}'], data[f'{var2}{rank_suffix}']) | |
else: | |
wilcoxon_stat, wilcoxon_p = np.nan, "Sample size too small for Wilcoxon test." | |
pairwise_results['Wilcoxon Test'][pair_rank_score] = {"Statistic": wilcoxon_stat, "p-value": wilcoxon_p} | |
# Friedman test | |
friedman_stat, friedman_p = friedmanchisquare(*rank_data) | |
rank_matrix = data[rank_columns].values | |
rank_matrix_transposed = np.transpose(rank_matrix) | |
posthoc_results = posthoc_nemenyi(rank_matrix_transposed) | |
#posthoc_results = posthoc_friedman(data, variables, rank_suffix) | |
results = { | |
"Average Ranks": average_ranks.to_dict(), | |
"Friedman Test": { | |
"Statistic": friedman_stat, | |
"p-value": friedman_p, | |
"Post-hoc": posthoc_results | |
}, | |
**pairwise_results, | |
} | |
return results | |
def hellinger_distance(p, q): | |
"""Calculate the Hellinger distance between two probability distributions.""" | |
return np.sqrt(0.5 * np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) | |
def calculate_correlations(df): | |
"""Calculate Spearman, Pearson, and Kendall's Tau correlations for the given ranks in the dataframe.""" | |
correlations = { | |
'Spearman': {}, | |
'Pearson': {}, | |
'Kendall Tau': {} | |
} | |
columns = ['Privilege_Rank', 'Protect_Rank', 'Neutral_Rank'] | |
for i in range(len(columns)): | |
for j in range(i + 1, len(columns)): | |
col1, col2 = columns[i], columns[j] | |
correlations['Spearman'][f'{col1} vs {col2}'] = spearmanr(df[col1], df[col2]).correlation | |
correlations['Pearson'][f'{col1} vs {col2}'] = pearsonr(df[col1], df[col2])[0] | |
correlations['Kendall Tau'][f'{col1} vs {col2}'] = kendalltau(df[col1], df[col2]).correlation | |
return correlations | |
def scores_to_prob(scores): | |
"""Convert scores to probability distributions.""" | |
value_counts = scores.value_counts() | |
probabilities = value_counts / value_counts.sum() | |
full_prob = np.zeros(int(scores.max()) + 1) | |
full_prob[value_counts.index.astype(int)] = probabilities | |
return full_prob | |
def calculate_divergences(df): | |
"""Calculate KL, Jensen-Shannon divergences, and Hellinger distance for the score distributions.""" | |
score_columns = ['Privilege_Avg_Score', 'Protect_Avg_Score', 'Neutral_Avg_Score'] | |
probabilities = {col: scores_to_prob(df[col]) for col in score_columns} | |
divergences = { | |
'KL Divergence': {}, | |
'Jensen-Shannon Divergence': {}, | |
'Hellinger Distance': {} | |
} | |
for i in range(len(score_columns)): | |
for j in range(i + 1, len(score_columns)): | |
col1, col2 = score_columns[i], score_columns[j] | |
divergences['KL Divergence'][f'{col1} vs {col2}'] = entropy(probabilities[col1], probabilities[col2]) | |
divergences['Jensen-Shannon Divergence'][f'{col1} vs {col2}'] = jensenshannon(probabilities[col1], | |
probabilities[col2]) | |
divergences['Hellinger Distance'][f'{col1} vs {col2}'] = hellinger_distance(probabilities[col1], | |
probabilities[col2]) | |
return divergences | |