Loading and preparing data for Analysis

data <- import(here("data2", "batch2_150324.csv")) 

data %<>% slice(-1:-2) #getting rid of unnecessary 

#formatting date format to do more filters 
data$EndDate <- as.Date(data$EndDate , format = "%m/%d/%y")

#now I will filter for it to only show data since the day data collection started, taking out pilot and test runs

data_dc <- data%>%
  filter(EndDate > '2024-02-21')%>%
  filter(EndDate < '2024-03-11')%>%
  filter(Status != 'Survey Preview')%>%
  filter(Finished != 'FALSE')%>%
  filter(att_ch_noun == 'Library')%>% # filter att check fails
  filter(att_ch_fruit == 'Orange') %>%
  filter( cc_inf_qy %in% c('Meat Industry,Fossil Fuels Use (planes, cars, etc)', 'Meat Industry', 'Fossil Fuels Use (planes, cars, etc)') |  cc_inf_qn %in% c('Recycling', 'Recycling,None of the above'))%>%
  filter( f_inf_qy %in% c('Keep kitchen clean,Store raw meat separately',  'Keep kitchen clean', 'Store raw meat separately') |  f_inf_qn %in% c('Store all foods at room temperature,Don’t clean kitchen utensils', 'Store all foods at room temperature', 'Don’t clean kitchen utensils'))%>%
    filter( mh_inf_qy %in% c('Have a good sleep,Do exercise', 'Have a good sleep', 'Do exercise') |  mh_inf_qn %in% c('None of the above'))

Now lets prepare variables for analysis

## conditions: Hypocrisy/Control ; Left/Right 

data_a <- data_dc %>%
  mutate(condition_h = case_when(judgment_hlf_1  == "" & judgment_hlm_1 == "" & judgment_hlj_1 == "" & judgment_hrf_1  == "" & judgment_hrm_1 == "" & judgment_hrj_1 == "" ~ 'Control'))%>%
  mutate(condition_hyp = case_when(is.na(condition_h) ~ 'Hypocrisy'))%>%
  mutate(condition_hc = as.factor(case_when(condition_hyp== 'Hypocrisy' ~ 'Hypocrisy', condition_h == 'Control' ~ 'Control')))%>%
  mutate(condition_l = case_when(judgment_hrf_1  == "" & judgment_hrm_1 == "" & judgment_hrj_1 == "" & judgment_crf_1  == "" & judgment_crm_1 == "" & judgment_crj_1 == "" ~ 'Left'))%>%
  mutate(condition_r = case_when(is.na(condition_l) ~ 'Right'))%>%
  mutate(condition_lr = as.factor(case_when(condition_l== 'Left' ~ 'Left', condition_r == 'Right' ~ 'Right')))%>%
  dplyr::rename(id = 'Random ID')%>%
  dplyr::rename(id_w = 'workerId')

long_hc <- data_a%>%
  select(judgment_hrf_1, judgment_hrm_1, judgment_hrj_1, judgment_crf_1, judgment_crm_1, judgment_crj_1, judgment_hlf_1, judgment_hlm_1, judgment_hlj_1, judgment_clf_1, judgment_clm_1, judgment_clj_1, id, id_w, condition_hc, condition_lr, starts_with('pol_'), -starts_with('pol_orientation_6'), starts_with('ccp_'), political_party, starts_with('emo_'), starts_with('donation_'))%>%
  pivot_longer( cols = starts_with("judgment_"),
    names_to = "topic",
    names_prefix = "judgment_",
    values_to = "judgment")%>%
  mutate(topic_ok= case_when(topic == 'hrf_1' | topic == 'hlf_1' | topic == 'crf_1' | topic == 'clf_1' ~ 'Fuel', topic == 'hrm_1' | topic == 'hlm_1' | topic == 'crm_1' | topic == 'clm_1' ~ 'Meat', topic == 'hrj_1' | topic == 'hlj_1' | topic == 'crj_1' | topic == 'clj_1' ~ 'Jet'))%>%
  dplyr::rename(topic_old = topic)%>%
  dplyr::rename(topic = topic_ok)%>%
  select(-topic_old)%>%
  filter(judgment != "")%>% 
  mutate_at('topic', factor) %>%
  dplyr::rename(clean_air = 'donation_1', co2_coalition = 'donation_4', goodwill = 'donation_5' )%>%
  mutate(donation = as.numeric(clean_air) + (-1)*as.numeric(co2_coalition) + 0*as.numeric(goodwill)) %>%
  mutate(pol_group = case_when(political_party == 'Democratic party' ~ 'Left', political_party == 'Republican party'  ~ 'Right' , political_party == 'None' | political_party == 'Other:' | political_party == 'Prefer not to say'~ 'Other')) %>%
  mutate(orientation_group = case_when(pol_orientation == 'Very Liberal/Left wing' | pol_orientation == 'Liberal/Left wing' ~ 'Left', pol_orientation == 'Very Conservative/Right wing' | pol_orientation == 'Conservative/Right wing' ~ 'Right', pol_orientation == 'Moderate' | pol_orientation == 'Prefer not to say' | pol_orientation == 'These options don’t describe my beliefs' ~ 'Other' )) %>% 
  mutate(group = case_when(pol_group == 'Left' & orientation_group == 'Left' ~ 'Left', pol_group == 'Right' & orientation_group == 'Right' ~ 'Right',  pol_group == 'Other' | orientation_group == 'Other'  ~ 'Other', pol_group == 'Right' & orientation_group == 'Left'  ~ 'Inconsistent', pol_group == 'Left' & orientation_group == 'Right'  ~ 'Inconsistent'))%>%
  filter(group != 'Other') %>%
  filter(group !=  'Inconsistent') %>%
  mutate(ingroup = case_when(group == condition_lr ~ 1, group != condition_lr ~ 0))%>%
  mutate(ingroup_f = as.factor(case_when(ingroup== 1 ~ 'ingroup', ingroup == 0 ~ 'outgroup')))%>%
   mutate(donation_p = donation/100) %>%
  mutate(across(c(clean_air, co2_coalition, goodwill), as.numeric)) %>%
  mutate(clean_air_p = clean_air/100, co2_coalition_p = co2_coalition/100, goodwill_p = goodwill/100)%>%
  mutate(across(starts_with('ccp_'), as.numeric))%>%
  mutate(cc_policy = (ccp_gasoline_1 + ccp_electric_1 + ccp_meal_1 + ccp_flights_1)/4)



long_likely <- data_a%>%
  select(starts_with('likely_'), id, id_w, condition_hc, condition_lr, starts_with('pol_'), -starts_with('pol_orientation_6'), starts_with('ccp_'), political_party, starts_with('emo_'), starts_with('donation_'))%>%
  pivot_longer( cols = starts_with("likely_"),
    names_to = "topic",
    names_prefix = "likely_",
    values_to = "likely")%>%
  mutate(topic_ok= case_when(topic == 'hrf_1' | topic == 'hlf_1' | topic == 'crf_1' | topic == 'clf_1' ~ 'Fuel', topic == 'hrm_1' | topic == 'hlm_1' | topic == 'crm_1' | topic == 'clm_1' ~ 'Meat', topic == 'hrj_1' | topic == 'hlj_1' | topic == 'crj_1' | topic == 'clj_1' ~ 'Jet'))%>%
  dplyr::rename(topic_old = topic)%>%
  dplyr::rename(topic = topic_ok)%>%
  select(-topic_old)%>%
  filter(likely != "")%>% 
  mutate_at('topic', factor)%>%
  mutate_at('likely', as.numeric)

likely_id_long <- long_likely %>%
  select(likely, id_w) 

long_full2 <- merge(long_likely, long_hc, by = 'id')

likely_id_wide <- likely_id_long%>% distinct(id_w, .keep_all = TRUE)

# IMPORTANT: we can change these filters, as of now, they only KEEP people who answered either both Right in Orientation AND Party, or both Left in Orientation AND Party, and filtering out moderates, other, prefer not to say, none, or if they were inconsistent (eg, left in party but right on orientation), the justification for this is that it does not match the filters we used on mTurk to have a balanced sample between left and right wings, meaning that we cannot entirely trust their data to test the ingroup out group effects especially - when appropriate we will urn analysis including moderates to double check consistency of results

## Below we DON'T get rid of moderates  so we can do other analyses 

long_hc2 <- data_a%>%
  select(judgment_hrf_1, judgment_hrm_1, judgment_hrj_1, judgment_crf_1, judgment_crm_1, judgment_crj_1, judgment_hlf_1, judgment_hlm_1, judgment_hlj_1, judgment_clf_1, judgment_clm_1, judgment_clj_1, id, id_w, condition_hc, condition_lr, starts_with('pol_'), -starts_with('pol_orientation_6'), starts_with('ccp_'), political_party, starts_with('emo_'), starts_with('donation_'))%>%
  pivot_longer( cols = starts_with("judgment_"),
    names_to = "topic",
    names_prefix = "judgment_",
    values_to = "judgment")%>%
  mutate(topic_ok= case_when(topic == 'hrf_1' | topic == 'hlf_1' | topic == 'crf_1' | topic == 'clf_1' ~ 'Fuel', topic == 'hrm_1' | topic == 'hlm_1' | topic == 'crm_1' | topic == 'clm_1' ~ 'Meat', topic == 'hrj_1' | topic == 'hlj_1' | topic == 'crj_1' | topic == 'clj_1' ~ 'Jet'))%>%
  dplyr::rename(topic_old = topic)%>%
  dplyr::rename(topic = topic_ok)%>%
  select(-topic_old)%>%
  filter(judgment != "")%>% 
  mutate_at('topic', factor) %>%
  dplyr::rename(clean_air = 'donation_1', co2_coalition = 'donation_4', goodwill = 'donation_5' )%>%
  mutate(donation = as.numeric(clean_air) + (-1)*as.numeric(co2_coalition) + 0*as.numeric(goodwill)) %>%
  mutate(pol_group = case_when(political_party == 'Democratic party' ~ 'Left', political_party == 'Republican party'  ~ 'Right' , political_party == 'None' | political_party == 'Other:' | political_party == 'Prefer not to say'~ 'Other')) %>%
  mutate(orientation_group = case_when(pol_orientation == 'Very Liberal/Left wing'~ 'Very Left', pol_orientation == 'Liberal/Left wing' ~ 'Left', pol_orientation == 'Very Conservative/Right wing' ~ 'Very Right', pol_orientation == 'Conservative/Right wing' ~ 'Right', pol_orientation == 'Moderate'  ~ 'Moderate', pol_orientation == 'Prefer not to say' | pol_orientation == 'These options don’t describe my beliefs' ~ 'Other' )) %>% 
  mutate(group = case_when(pol_group == 'Left' & orientation_group == 'Left' ~ 'Left', pol_group == 'Right' & orientation_group == 'Right' ~ 'Right',  pol_group == 'Other' | orientation_group == 'Other'  ~ 'Other', pol_group == 'Right' & orientation_group == 'Left'  ~ 'Inconsistent', pol_group == 'Left' & orientation_group == 'Right'  ~ 'Inconsistent'))%>%
   mutate(donation_p = donation/100) %>%
  mutate(across(c(clean_air, co2_coalition, goodwill), as.numeric)) %>%
  mutate(clean_air_p = clean_air/100, co2_coalition_p = co2_coalition/100, goodwill_p = goodwill/100)%>%
  mutate(across(starts_with('ccp_'), as.numeric))%>%
  mutate(cc_policy = (ccp_gasoline_1 + ccp_electric_1 + ccp_meal_1 + ccp_flights_1)/4)

# Now wide format

wide_hc <- long_hc %>%
   pivot_wider(names_from = topic, values_from = judgment)%>%
  mutate(judgment_m = (as.numeric(Fuel) + as.numeric(Jet) + as.numeric(Meat))/3)

wide_hc_lik <- merge(likely_id_wide, wide_hc, by = 'id_w')


# Including moderates wide format 

wide_hc2 <- long_hc2 %>%
   pivot_wider(names_from = topic, values_from = judgment) %>%
   mutate(judgment_m = (as.numeric(Fuel) + as.numeric(Jet) + as.numeric(Meat))/3) %>%
   filter(orientation_group != "Other") %>%
   filter(!is.na(orientation_group)) %>%
   mutate(orientation_group = factor(orientation_group)) %>%
   mutate(orientation_num = case_when(
      orientation_group == 'Very Left' ~ 1,
      orientation_group == 'Left' ~ 2,
      orientation_group == 'Moderate' ~ 3,
      orientation_group == 'Right' ~ 4,
      orientation_group == 'Very Right' ~ 5
   )) %>%
   mutate(orientation_num = as.numeric(orientation_num))



## Let's prep the emotion ratings in a data set for exploratory analysis

emo <- long_hc %>%
  select(starts_with("emo_"), topic, id)%>%
    pivot_longer( cols = starts_with("emo_"),
    names_to = "emo_type",
    names_prefix = "emo_",
    values_to = "emo_rating")%>%
  separate(emo_type, c("topic", "emotion"), sep = "_")%>%
   mutate(topic= case_when(topic == 'hrf' | topic == 'hlf' | topic == 'crf' | topic == 'clf' ~ 'Fuel', topic == 'hrm' | topic == 'hlm' | topic == 'crm' | topic == 'clm' ~ 'Meat', topic == 'hrj' | topic == 'hlj' | topic == 'crj' | topic == 'clj' ~ 'Jet'))%>%
  mutate(emotion = case_when(emotion == '1' ~ 'enthusiastic', emotion == '2' ~ "worried", emotion == '3' ~ "proud", emotion == '4' ~ "angry", emotion == '5' ~ "hopeful", emotion == '6' ~  "frustrated"))%>%
  filter(emo_rating != "")%>%
  mutate_at('emo_rating', as.numeric)%>%
  mutate_at('emotion', as.factor) %>%
  group_by(id, emotion, topic)%>%
  dplyr::summarise(emo_m = mean(emo_rating))%>%
  pivot_wider(names_from = emotion, values_from = emo_m)
  
# long data fram with Likely (ie. plausibility) variable (not including moderates)

long_full <- merge(long_hc, likely_id_wide, by= "id_w")

Quick checks on data

#let's check alpha for judgment across topics

judgment <- wide_hc%>%
  select(Fuel, Jet, Meat)%>%
  mutate(across(c(Fuel, Jet, Meat), as.numeric))

alpha(judgment) #alpha of 0.69, acceptable
## 
## Reliability analysis   
## Call: alpha(x = judgment)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N  ase mean  sd median_r
##       0.69      0.68    0.62      0.41 2.1 0.02  3.2 1.4     0.33
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.64  0.69  0.72
## Duhachek  0.65  0.69  0.72
## 
##  Reliability if an item is dropped:
##      raw_alpha std.alpha G6(smc) average_r  S/N alpha se var.r med.r
## Fuel      0.46      0.46    0.30      0.30 0.87    0.040    NA  0.30
## Jet       0.76      0.76    0.61      0.61 3.13    0.018    NA  0.61
## Meat      0.49      0.49    0.33      0.33 0.97    0.038    NA  0.33
## 
##  Item statistics 
##        n raw.r std.r r.cor r.drop mean  sd
## Fuel 706  0.85  0.83  0.72   0.59  2.8 1.9
## Jet  706  0.66  0.70  0.41   0.35  3.1 1.6
## Meat 706  0.83  0.82  0.71   0.58  3.5 1.8
## 
## Non missing response frequency for each item
##         1    2    3    4    5    6    7 miss
## Fuel 0.35 0.23 0.10 0.12 0.05 0.07 0.08    0
## Jet  0.18 0.19 0.20 0.28 0.06 0.05 0.04    0
## Meat 0.15 0.20 0.16 0.23 0.07 0.10 0.09    0
# let's check alpha for policy support across items

ccp_supp <- wide_hc %>%
  select(ccp_gasoline_1, ccp_electric_1, ccp_meal_1, ccp_flights_1)

alpha(ccp_supp) #alpha of 0.83, very good 
## 
## Reliability analysis   
## Call: alpha(x = ccp_supp)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.83      0.83    0.79      0.55 4.9 0.011  3.3 1.7     0.53
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.81  0.83  0.85
## Duhachek  0.81  0.83  0.85
## 
##  Reliability if an item is dropped:
##                raw_alpha std.alpha G6(smc) average_r S/N alpha se   var.r med.r
## ccp_gasoline_1      0.75      0.75    0.66      0.50 3.0    0.016 0.00057  0.50
## ccp_electric_1      0.79      0.79    0.73      0.56 3.8    0.014 0.00954  0.55
## ccp_meal_1          0.81      0.81    0.75      0.59 4.4    0.012 0.00548  0.60
## ccp_flights_1       0.78      0.79    0.71      0.55 3.7    0.014 0.00247  0.55
## 
##  Item statistics 
##                  n raw.r std.r r.cor r.drop mean  sd
## ccp_gasoline_1 706  0.86  0.86  0.82   0.74  2.8 2.0
## ccp_electric_1 706  0.82  0.80  0.70   0.64  4.2 2.2
## ccp_meal_1     706  0.77  0.77  0.65   0.59  2.9 2.0
## ccp_flights_1  706  0.81  0.81  0.73   0.66  3.5 2.1
## 
## Non missing response frequency for each item
##                   1    2    3    4    5    6    7 miss
## ccp_gasoline_1 0.43 0.13 0.12 0.08 0.10 0.06 0.07    0
## ccp_electric_1 0.22 0.09 0.07 0.08 0.17 0.17 0.20    0
## ccp_meal_1     0.40 0.14 0.12 0.09 0.10 0.08 0.07    0
## ccp_flights_1  0.27 0.12 0.14 0.11 0.14 0.12 0.10    0

Checking the likely variable (Plausibility) - graphs and summaries (how likely participants think the social media posts would be true)

sum_likely <- long_likely %>%
  group_by(topic, condition_hc)%>%
  #dplyr::summarise(avg = mean(likely))%>%
   dplyr::summarise(sd = sd(likely))




# Define colorblind-friendly colors
cb_colors <- c("#E69F00", "#56B4E9")  # Orange and Blue from ColorBrewer

# Create the histogram
ggplot(long_full, aes(x = likely, fill = condition_hc)) +
  geom_histogram(binwidth = 1, position = "dodge", color = "black", alpha = 0.8) +
  facet_wrap(~condition_hc) +
  scale_fill_manual(values = cb_colors) +
  theme_minimal(base_size = 14) +
  theme(
    strip.text = element_text(size = 16, face = "bold"),
    legend.position = "none",  # Remove legend since facets indicate groups
    panel.grid.major = element_blank(),
    panel.grid.minor = element_blank(),
    axis.text = element_text(size = 12),
    axis.title = element_text(size = 14, face = "bold")
  ) +
  labs(
    x = "Plausibility",
    y = "Count",
    title = "Distribution of Plausibility Scores by Condition"
  )

Obtaining demographics

data_wide_dem <- data_a %>%
  select(political_party, pol_orientation, cc_important, id, age, gender, race, religion, education, education_parents, )%>%
  mutate(pol_group = case_when(political_party == 'Democratic party' ~ 'Left', political_party == 'Republican party'  ~ 'Right' , political_party == 'None' | political_party == 'Other:' | political_party == 'Prefer not to say'~ 'Other')) %>%
  mutate(orientation_group = case_when(pol_orientation == 'Very Liberal/Left wing' | pol_orientation == 'Liberal/Left wing' ~ 'Left', pol_orientation == 'Very Conservative/Right wing' | pol_orientation == 'Conservative/Right wing' ~ 'Right', pol_orientation == 'Moderate' | pol_orientation == 'Prefer not to say' | pol_orientation == 'These options don’t describe my beliefs' ~ 'Other' )) %>% 
  mutate(group = case_when(pol_group == 'Left' & orientation_group == 'Left' ~ 'Left', pol_group == 'Right' & orientation_group == 'Right' ~ 'Right',  pol_group == 'Other' | orientation_group == 'Other'  ~ 'Other', pol_group == 'Right' & orientation_group == 'Left'  ~ 'Inconsistent', pol_group == 'Left' & orientation_group == 'Right'  ~ 'Inconsistent'))%>%
  filter(group != 'Other') %>%
  filter(group !=  'Inconsistent')

Analyses

Hipothesis 1.2

## quick plots to observe the data 

hist(wide_hc$judgment_m)

hist(scale(wide_hc$judgment_m))

ggplot(wide_hc, aes(x = condition_hc, y = judgment_m)) +
  geom_violin(draw_quantiles =  0.5) +
  labs(title = "Violin Plot of Judgment Hypocrisy vs Control",
       x = "Condition",
       y = "Mean Jdgment") +
  stat_compare_means(method = "wilcox.test", label.x = 1.3, label.y = 4) 

# judgment is not normally distributed so for a simple menat ocmaprison test we will use wilcoxon (and check with a t test as well)

judgment_m_sd <- wide_hc%>%
  group_by(condition_hc)%>%
  dplyr::summarise(mean = mean(judgment_m), sd = sd(judgment_m))

wilcox_hc <- wide_hc%>%
  pivot_wider(names_from = condition_hc, values_from = judgment_m)%>%
  select(Control, Hypocrisy)

control_j <- wilcox_hc%>%
  select(Control)%>%
  na.omit()

hypocrisy_j <- wilcox_hc%>%
  select(Hypocrisy)%>%
  na.omit()

wilcox.test(control_j$Control, hypocrisy_j$Hypocrisy)
## 
##  Wilcoxon rank sum test with continuity correction
## 
## data:  control_j$Control and hypocrisy_j$Hypocrisy
## W = 79777, p-value = 9.78e-11
## alternative hypothesis: true location shift is not equal to 0
t.test(control_j$Control, hypocrisy_j$Hypocrisy)
## 
##  Welch Two Sample t-test
## 
## data:  control_j$Control and hypocrisy_j$Hypocrisy
## t = 6.5038, df = 701.26, p-value = 1.489e-10
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.4669204 0.8707257
## sample estimates:
## mean of x mean of y 
##  3.489262  2.820439
### effect size

## prepping the needed info 

wide_hc %<>% mutate_at('condition_hc', as.factor)
test_result <- wilcox.test(judgment_m ~ condition_hc, data= wide_hc)

wilcox.test(judgment_m ~ condition_hc, data= wide_hc, conf.int = T, conf.level = .95)
## 
##  Wilcoxon rank sum test with continuity correction
## 
## data:  judgment_m by condition_hc
## W = 79777, p-value = 9.78e-11
## alternative hypothesis: true location shift is not equal to 0
## 95 percent confidence interval:
##  0.3333483 0.9999794
## sample estimates:
## difference in location 
##              0.6667164
# Extract W statistic from the test result
W <- test_result$statistic

# Get the sample sizes for each group (assuming 'condition' is a factor with two levels)
n1 <- as.numeric(nrow(hypocrisy_j)) # Number of observations in the Control group
n2 <- as.numeric(nrow(control_j)) # Number of observations in the Hipocrisy group

# Calculate the mean (mu_W) and standard deviation (sigma_W) of W
mu_W <- (n1 * n2) / 2
sigma_W <- sqrt((n1 * n2 * (n1 + n2 + 1)) / 12)

# Calculate the Z statistic (standardized test statistic)
Z <- (W - mu_W) / sigma_W

# Calculate the rank-biserial correlation (effect size)
r <- Z / sqrt(n1 + n2)

# Print the Z statistic and the effect size
Z
##        W 
## 6.451905
r #effect size
##         W 
## 0.2428206

Hypothesis 2: Now the interaction

#quick data observation 

hist(wide_hc$cc_policy)

# let's run models (judgment averaged across topics)
m_int <- lm(judgment_m ~ condition_hc*cc_policy, data=wide_hc)

m_int_all <- lm(judgment_m ~ condition_hc*cc_policy, data=wide_hc2) #including moderates


# let's run models (topics as random effects)

m_int_2 <- lmer(as.numeric(judgment) ~ condition_hc*cc_policy+ (1|topic), data=long_hc)

m_int_2_all <- lmer(as.numeric(judgment) ~ condition_hc*cc_policy+ (1|topic), data=long_hc2) #including moderates

# results

summary(m_int_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc * cc_policy + (1 | topic)
##    Data: long_hc
## 
## REML criterion at convergence: 8154.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.2984 -0.7320 -0.1483  0.6329  3.1216 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1171   0.3422  
##  Residual             2.7243   1.6505  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                                   Estimate Std. Error         df t value
## (Intercept)                        4.87716    0.22593    3.20370  21.587
## condition_hcHypocrisy             -1.03579    0.15842 2112.00000  -6.538
## cc_policy                         -0.42494    0.02978 2112.00000 -14.271
## condition_hcHypocrisy:cc_policy    0.12279    0.04249 2112.00000   2.890
##                                 Pr(>|t|)    
## (Intercept)                      0.00014 ***
## condition_hcHypocrisy           7.79e-11 ***
## cc_policy                        < 2e-16 ***
## condition_hcHypocrisy:cc_policy  0.00390 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H cc_plc
## cndtn_hcHyp -0.335              
## cc_policy   -0.430  0.614       
## cndtn_hcH:_  0.302 -0.891 -0.701
summary(m_int_all)
## 
## Call:
## lm(formula = judgment_m ~ condition_hc * cc_policy, data = wide_hc2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.3936 -0.8671 -0.1528  0.7367  4.0614 
## 
## Coefficients:
##                                 Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                      4.78460    0.13418  35.658  < 2e-16 ***
## condition_hcHypocrisy           -0.96508    0.19501  -4.949 9.16e-07 ***
## cc_policy                       -0.39104    0.03663 -10.676  < 2e-16 ***
## condition_hcHypocrisy:cc_policy  0.10533    0.05259   2.003   0.0455 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.229 on 774 degrees of freedom
## Multiple R-squared:  0.2266, Adjusted R-squared:  0.2236 
## F-statistic: 75.59 on 3 and 774 DF,  p-value: < 2.2e-16
summary(m_int_2_all)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc * cc_policy + (1 | topic)
##    Data: long_hc2
## 
## REML criterion at convergence: 9044.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.2639 -0.7389 -0.1421  0.6081  3.1445 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1113   0.3337  
##  Residual             2.7405   1.6554  
## Number of obs: 2346, groups:  topic, 3
## 
## Fixed effects:
##                                   Estimate Std. Error         df t value
## (Intercept)                        4.79758    0.21876    3.12562  21.930
## condition_hcHypocrisy             -0.97711    0.15109 2340.00000  -6.467
## cc_policy                         -0.39362    0.02838 2340.00000 -13.869
## condition_hcHypocrisy:cc_policy    0.10774    0.04082 2340.00000   2.639
##                                 Pr(>|t|)    
## (Intercept)                     0.000158 ***
## condition_hcHypocrisy           1.21e-10 ***
## cc_policy                        < 2e-16 ***
## condition_hcHypocrisy:cc_policy 0.008361 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H cc_plc
## cndtn_hcHyp -0.325              
## cc_policy   -0.420  0.608       
## cndtn_hcH:_  0.292 -0.892 -0.695
qqnorm(residuals(m_int_2))

qqnorm(residuals(m_int_2_all))

hist(residuals(m_int))

summary(m_int_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc * cc_policy + (1 | topic)
##    Data: long_hc
## 
## REML criterion at convergence: 8154.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.2984 -0.7320 -0.1483  0.6329  3.1216 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1171   0.3422  
##  Residual             2.7243   1.6505  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                                   Estimate Std. Error         df t value
## (Intercept)                        4.87716    0.22593    3.20370  21.587
## condition_hcHypocrisy             -1.03579    0.15842 2112.00000  -6.538
## cc_policy                         -0.42494    0.02978 2112.00000 -14.271
## condition_hcHypocrisy:cc_policy    0.12279    0.04249 2112.00000   2.890
##                                 Pr(>|t|)    
## (Intercept)                      0.00014 ***
## condition_hcHypocrisy           7.79e-11 ***
## cc_policy                        < 2e-16 ***
## condition_hcHypocrisy:cc_policy  0.00390 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H cc_plc
## cndtn_hcHyp -0.335              
## cc_policy   -0.430  0.614       
## cndtn_hcH:_  0.302 -0.891 -0.701
hist(residuals(m_int_2))

## effect size



int_eta <- eta_squared(m_int)


m_int_eta <- int_eta[3]

d_int <- sqrt((4*m_int_eta)/(1 - m_int_eta))


export_summs(m_int_2, confint = TRUE, robust = "HC0")
Model 1
(Intercept)4.88 ***
(0.23)   
condition_hcHypocrisy-1.04 ***
(0.16)   
cc_policy-0.42 ***
(0.03)   
condition_hcHypocrisy:cc_policy0.12 ** 
(0.04)   
N2118       
N (topic) 3       
AIC8166.75    
BIC8200.70    
R2 (fixed)0.15    
R2 (total)0.19    
Standard errors are heteroskedasticity robust. *** p < 0.001; ** p < 0.01; * p < 0.05.
# tables

tab_model(m_int_2_all,   pred.labels = c("Intercept", "Condition (Hipocrisy)", "Policy Support", "Hypocrisy*PolicySupp"),
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
Intercept 4.80 0.17 4.37 – 5.23 -0.05 – 0.38 <0.001 0.125
Condition (Hipocrisy) -0.98 -0.34 -1.27 – -0.68 -0.42 – -0.27 <0.001 <0.001
Policy Support -0.39 -0.37 -0.45 – -0.34 -0.42 – -0.31 <0.001 <0.001
Hypocrisy*PolicySupp 0.11 0.10 0.03 – 0.19 0.03 – 0.17 0.008 0.008
Random Effects
σ2 2.74
τ00 topic 0.11
ICC 0.04
N topic 3
Observations 2346
Marginal R2 / Conditional R2 0.135 / 0.169
tab_model(m_int,   pred.labels = c("Intercept", "Condition (Hipocrisy)", "Policy Support", "Hypocrisy*PolicySupp"),
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
Intercept 4.88 0.22 4.60 – 5.15 0.13 – 0.31 <0.001 <0.001
Condition (Hipocrisy) -1.04 -0.45 -1.43 – -0.64 -0.57 – -0.32 <0.001 <0.001
Policy Support -0.42 -0.51 -0.50 – -0.35 -0.60 – -0.42 <0.001 <0.001
Hypocrisy*PolicySupp 0.12 0.15 0.02 – 0.23 0.02 – 0.28 0.024 0.024
Observations 706
R2 / R2 adjusted 0.254 / 0.251
tab_model(m_int_2,   pred.labels = c("Intercept", "Condition (Hipocrisy)", "Policy Support", "Hypocrisy*PolicySupp"),
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
Intercept 4.88 0.17 4.43 – 5.32 -0.05 – 0.39 <0.001 0.132
Condition (Hipocrisy) -1.04 -0.35 -1.35 – -0.73 -0.42 – -0.27 <0.001 <0.001
Policy Support -0.42 -0.40 -0.48 – -0.37 -0.45 – -0.34 <0.001 <0.001
Hypocrisy*PolicySupp 0.12 0.11 0.04 – 0.21 0.04 – 0.19 0.004 0.004
Random Effects
σ2 2.72
τ00 topic 0.12
ICC 0.04
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.150 / 0.185

H2: runninG the analysis controlling for Likely (ie. plausibility)

# let's run models (topics as random effects)

m_2_likely <- lmer(as.numeric(judgment) ~ condition_hc*cc_policy + likely + (1|topic), data=long_full)

summary(m_2_likely)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc * cc_policy + likely + (1 |  
##     topic)
##    Data: long_full
## 
## REML criterion at convergence: 8160.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3075 -0.7323 -0.1494  0.6293  3.1260 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1171   0.3422  
##  Residual             2.7253   1.6508  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                                   Estimate Std. Error         df t value
## (Intercept)                        4.92234    0.24542    4.45989  20.056
## condition_hcHypocrisy             -1.03743    0.15849 2111.00000  -6.546
## cc_policy                         -0.42390    0.02986 2111.00000 -14.195
## likely                            -0.01018    0.02159 2111.00000  -0.472
## condition_hcHypocrisy:cc_policy    0.12345    0.04253 2111.00000   2.903
##                                 Pr(>|t|)    
## (Intercept)                     1.53e-05 ***
## condition_hcHypocrisy           7.41e-11 ***
## cc_policy                        < 2e-16 ***
## likely                           0.63730    
## condition_hcHypocrisy:cc_policy  0.00373 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H cc_plc likely
## cndtn_hcHyp -0.317                     
## cc_policy   -0.366  0.610              
## likely      -0.390  0.022 -0.074       
## cndtn_hcH:_  0.290 -0.892 -0.696 -0.033
tab_model(m_2_likely,   pred.labels = c("Intercept", "Condition (Hipocrisy)", "Policy Support", "Plausibility", "Hypocrisy*PolicySupp"),
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
Intercept 4.92 0.17 4.44 – 5.40 -0.05 – 0.39 <0.001 0.133
Condition (Hipocrisy) -1.04 -0.35 -1.35 – -0.73 -0.42 – -0.27 <0.001 <0.001
Policy Support -0.42 -0.39 -0.48 – -0.37 -0.45 – -0.34 <0.001 <0.001
Plausibility -0.01 -0.01 -0.05 – 0.03 -0.05 – 0.03 0.637 0.637
Hypocrisy*PolicySupp 0.12 0.11 0.04 – 0.21 0.04 – 0.19 0.004 0.004
Random Effects
σ2 2.73
τ00 topic 0.12
ICC 0.04
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.150 / 0.185

Hypotheses 3 & 4: ingroup-outgroup effect

# Hypothesis 3

m_inout <- lmer(as.numeric(judgment) ~ condition_hc + ingroup_f + (1|topic), data = long_hc)
summary(m_inout)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc + ingroup_f + (1 | topic)
##    Data: long_hc
## 
## REML criterion at convergence: 8375.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.7896 -0.8302 -0.1134  0.6422  2.7501 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1167   0.3416  
##  Residual             3.0324   1.7414  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                         Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)              3.77625    0.20781    2.29326  18.171  0.00158 ** 
## condition_hcHypocrisy   -0.66551    0.07568 2113.00000  -8.793  < 2e-16 ***
## ingroup_foutgroup       -0.56919    0.07569 2113.00000  -7.520 8.02e-14 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H
## cndtn_hcHyp -0.179       
## ingrp_ftgrp -0.184 -0.006
tab_model(m_inout,
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 3.78 0.34 3.37 – 4.18 0.12 – 0.56 <0.001
condition hc [Hypocrisy] -0.67 -0.37 -0.81 – -0.52 -0.45 – -0.28 <0.001
ingroup f [outgroup] -0.57 -0.31 -0.72 – -0.42 -0.40 – -0.23 <0.001
Random Effects
σ2 3.03
τ00 topic 0.12
ICC 0.04
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.058 / 0.093
qqnorm(residuals(m_inout))

hist(residuals(m_inout))

# effect size 

inout_eta <- effectsize::eta_squared(m_inout)

m_inout_eta <- inout_eta$Eta2_partial[2]

d_inout <- sqrt((4*m_inout_eta)/(1 - m_inout_eta))

d_inout
## [1] 0.3272092
# Hypothesis 4

m_inout2 <- lmer(as.numeric(judgment) ~ condition_hc + ingroup_f *cc_policy + (1|topic), data = long_hc)
summary(m_inout2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc + ingroup_f * cc_policy +  
##     (1 | topic)
##    Data: long_hc
## 
## REML criterion at convergence: 8092.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5151 -0.7219 -0.1295  0.6096  3.3617 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1173   0.3424  
##  Residual             2.6416   1.6253  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                               Estimate Std. Error         df t value Pr(>|t|)
## (Intercept)                    5.16666    0.22939    3.40406  22.524 7.97e-05
## condition_hcHypocrisy         -0.62960    0.07071 2111.00000  -8.904  < 2e-16
## ingroup_foutgroup             -0.94270    0.15591 2111.00000  -6.046 1.75e-09
## cc_policy                     -0.42069    0.02959 2111.00000 -14.217  < 2e-16
## ingroup_foutgroup:cc_policy    0.10689    0.04184 2111.00000   2.555   0.0107
##                                
## (Intercept)                 ***
## condition_hcHypocrisy       ***
## ingroup_foutgroup           ***
## cc_policy                   ***
## ingroup_foutgroup:cc_policy *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H ingrp_ cc_plc
## cndtn_hcHyp -0.151                     
## ingrp_ftgrp -0.349  0.025              
## cc_policy   -0.432 -0.002  0.635       
## ingrp_ftg:_  0.310 -0.031 -0.891 -0.707
tab_model(m_inout2,
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
(Intercept) 5.17 0.34 4.72 – 5.62 0.11 – 0.56 <0.001 0.003
condition hc [Hypocrisy] -0.63 -0.35 -0.77 – -0.49 -0.42 – -0.27 <0.001 <0.001
ingroup f [outgroup] -0.94 -0.32 -1.25 – -0.64 -0.40 – -0.25 <0.001 <0.001
cc policy -0.42 -0.39 -0.48 – -0.36 -0.45 – -0.34 <0.001 <0.001
ingroup f [outgroup] × cc
policy
0.11 0.10 0.02 – 0.19 0.02 – 0.18 0.011 0.011
Random Effects
σ2 2.64
τ00 topic 0.12
ICC 0.04
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.175 / 0.210
qqnorm(residuals(m_inout2))

inout2_eta <- effectsize::eta_squared(m_inout2)

m_inout2_eta <- inout2_eta$Eta2_partial[4]

d_inout2 <- sqrt((4*m_inout2_eta)/(1 - m_inout2_eta))

H3 & 4: running the analysis controlling for Likely (ie. plausibility)

# let's run models (topics as random effects)

m_3_likely <- lmer(as.numeric(judgment) ~ condition_hc + ingroup_f + likely +  (1|topic), data = long_full)

summary(m_3_likely)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc + ingroup_f + likely + (1 |  
##     topic)
##    Data: long_full
## 
## REML criterion at convergence: 8376.9
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -1.86140 -0.82167 -0.09338  0.56927  2.80656 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1167   0.3416  
##  Residual             3.0279   1.7401  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                         Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)              3.99108    0.23292    3.61833  17.135 0.000135 ***
## condition_hcHypocrisy   -0.66237    0.07564 2112.00000  -8.757  < 2e-16 ***
## ingroup_foutgroup       -0.55876    0.07580 2112.00000  -7.371 2.41e-13 ***
## likely                  -0.04614    0.02259 2112.00000  -2.042 0.041270 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H ingrp_
## cndtn_hcHyp -0.150              
## ingrp_ftgrp -0.133 -0.004       
## likely      -0.452 -0.020 -0.067
tab_model(m_3_likely,
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 3.99 0.34 3.53 – 4.45 0.11 – 0.56 <0.001
condition hc [Hypocrisy] -0.66 -0.36 -0.81 – -0.51 -0.45 – -0.28 <0.001
ingroup f [outgroup] -0.56 -0.31 -0.71 – -0.41 -0.39 – -0.23 <0.001
likely -0.05 -0.04 -0.09 – -0.00 -0.08 – -0.00 0.041
Random Effects
σ2 3.03
τ00 topic 0.12
ICC 0.04
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.059 / 0.094
m_4_likely <- lmer(as.numeric(judgment) ~ condition_hc + ingroup_f *cc_policy  + likely + (1|topic), data = long_full)

summary(m_4_likely)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.numeric(judgment) ~ condition_hc + ingroup_f * cc_policy +  
##     likely + (1 | topic)
##    Data: long_full
## 
## REML criterion at convergence: 8098
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5154 -0.7218 -0.1299  0.6087  3.3595 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1172   0.3424  
##  Residual             2.6428   1.6257  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                               Estimate Std. Error         df t value Pr(>|t|)
## (Intercept)                  5.171e+00  2.491e-01  4.729e+00  20.762 7.91e-06
## condition_hcHypocrisy       -6.296e-01  7.073e-02  2.110e+03  -8.900  < 2e-16
## ingroup_foutgroup           -9.430e-01  1.562e-01  2.110e+03  -6.039 1.83e-09
## cc_policy                   -4.207e-01  2.961e-02  2.110e+03 -14.206  < 2e-16
## likely                      -9.046e-04  2.140e-02  2.110e+03  -0.042   0.9663
## ingroup_foutgroup:cc_policy  1.071e-01  4.204e-02  2.110e+03   2.547   0.0109
##                                
## (Intercept)                 ***
## condition_hcHypocrisy       ***
## ingroup_foutgroup           ***
## cc_policy                   ***
## likely                         
## ingroup_foutgroup:cc_policy *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H ingrp_ cc_plc likely
## cndtn_hcHyp -0.134                            
## ingrp_ftgrp -0.342  0.024                     
## cc_policy   -0.386 -0.001  0.632              
## likely      -0.389 -0.013  0.053 -0.030       
## ingrp_ftg:_  0.321 -0.030 -0.891 -0.701 -0.095
tab_model(m_4_likely,
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
(Intercept) 5.17 0.34 4.68 – 5.66 0.11 – 0.56 <0.001 0.003
condition hc [Hypocrisy] -0.63 -0.35 -0.77 – -0.49 -0.42 – -0.27 <0.001 <0.001
ingroup f [outgroup] -0.94 -0.32 -1.25 – -0.64 -0.40 – -0.25 <0.001 <0.001
cc policy -0.42 -0.39 -0.48 – -0.36 -0.45 – -0.34 <0.001 <0.001
likely -0.00 -0.00 -0.04 – 0.04 -0.04 – 0.04 0.966 0.966
ingroup f [outgroup] × cc
policy
0.11 0.10 0.02 – 0.19 0.02 – 0.18 0.011 0.011
Random Effects
σ2 2.64
τ00 topic 0.12
ICC 0.04
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.175 / 0.210

Plotting ingroup-outgroup comparison

# Custom labels for the x-axis
custom_labels <- c(
  "ingroup.Hypocrisy" = "Hypocrisy",
  "outgroup.Hypocrisy" = "Hypocrisy",
  "ingroup.Control" = "Control",
  "outgroup.Control" = "Control"
)

# Plot using ggplot
p <- ggplot(wide_hc, aes(x = interaction(ingroup_f, condition_hc), y = judgment_m, fill = ingroup_f)) +
  geom_violin(outlier.shape = NA) + # Boxplot without outliers
  geom_point(position = position_jitter(width = 0.2), alpha = 0.4)+
   stat_summary(fun = mean, fun.min = mean, fun.max = mean, 
               geom = "crossbar", width = 0.3, color = "darkgray") + # Mean line
  #stat_summary(fun = mean, geom = "point", shape = 20, size = 3, color = "blue") + # Median line
  #stat_boxplot(geom = "errorbar", width = 0.25) + # Error lines
  scale_fill_manual(values = c("#CC79A7", "#009E73")) + # Customize colors
  labs(title = "Leniency to the Political Ingroup", x = "Condition", y = "Judgment (Wrong to Right)", fill = "Group") +
    scale_x_discrete(labels = custom_labels) + # Use custom labels for the x-axis
  theme_minimal()



# Add significance annotation using ggsignif
p + geom_signif(comparisons = list(c("ingroup.Hypocrisy", "ingroup.Control")),
                map_signif_level = TRUE,
                y_position = max(wide_hc$judgment_m) + 0.5) +
    geom_signif(comparisons = list(c("outgroup.Hypocrisy", "outgroup.Control")),
                map_signif_level = TRUE,
                y_position = max(wide_hc$judgment_m) + 1) +
    geom_signif(comparisons = list(c("ingroup.Hypocrisy", "outgroup.Hypocrisy")),
                map_signif_level = TRUE,
                y_position = max(wide_hc$judgment_m) + 1.5) +
    geom_signif(comparisons = list(c("ingroup.Control", "outgroup.Control")), 
                map_signif_level = TRUE,
                y_position = max(wide_hc$judgment_m) + 2) 

Improved version of the plot:

# Define custom labels
custom_labels <- c(
  "ingroup.Hypocrisy" = "Hypocrisy",
  "outgroup.Hypocrisy" = "Hypocrisy",
  "ingroup.Control" = "Control",
  "outgroup.Control" = "Control"
)

# Compute group means
means_df <- wide_hc %>%
  group_by(group = interaction(ingroup_f, condition_hc)) %>%
  dplyr::summarise(mu_hat = mean(judgment_m), .groups = "drop")

# Base plot
p <- ggplot(wide_hc, aes(x = interaction(ingroup_f, condition_hc), y = judgment_m)) +
  geom_violin(color = "black", fill = NA) +  # No fill, just contour
  geom_jitter(aes(color = ingroup_f), width = 0.2, alpha = 0.6, size = 1.5) +  # Points colored by group
  geom_boxplot(width = 0.15, outlier.shape = NA, alpha = 0.3) +  # Boxplot for spread (pale)
  geom_point(data = means_df, aes(x = group, y = mu_hat), color = "black", size = 3) +  # Mean dot
  geom_text(data = means_df, aes(x = as.numeric(group) + 0.25,, y = mu_hat + 0.15, 
                                 label = paste0("μ̂ = ", round(mu_hat, 2))),
            size = 3, vjust = 0, fontface = "italic") +  # Mean label
  scale_color_manual(values = c("ingroup" = "#CC79A7", "outgroup" = "#009E73")) +
  scale_x_discrete(labels = custom_labels) +
  labs(title = "Leniency to the Political Ingroup",
       x = "Condition", y = "Judgment (Wrong to Right)", color = "Group") +
  theme_minimal() +
  theme(legend.position = "top")

# Add significance bars
p_final <- p +
  geom_signif(comparisons = list(c("ingroup.Hypocrisy", "ingroup.Control")),
              map_signif_level = TRUE,
              y_position = max(wide_hc$judgment_m) + 0.5) +
  geom_signif(comparisons = list(c("outgroup.Hypocrisy", "outgroup.Control")),
              map_signif_level = TRUE,
              y_position = max(wide_hc$judgment_m) + 1) +
  geom_signif(comparisons = list(c("ingroup.Hypocrisy", "outgroup.Hypocrisy")),
              map_signif_level = TRUE,
              y_position = max(wide_hc$judgment_m) + 1.5) +
  geom_signif(comparisons = list(c("ingroup.Control", "outgroup.Control")),
              map_signif_level = TRUE,
              y_position = max(wide_hc$judgment_m) + 2)

# Show the final plot
print(p_final)

# Save the plot in high definition
ggsave("figure2_high_quality.png", p_final, width = 10, height = 6, dpi = 600, bg = "white")

Hypothesis 1.1 & Hypothesis 4

Donation analysis

# Quick mean comparison 
wilcox_d <- wide_hc%>%
  pivot_wider(names_from = condition_hc, values_from = donation_p)%>%
  select(Control, Hypocrisy)

control_d <- wilcox_d%>%
  select(Control)%>%
  na.omit()

hypocrisy_d <- wilcox_d%>%
  select(Hypocrisy)%>%
  na.omit()

wilcox.test(control_d$Control, hypocrisy_d$Hypocrisy)
## 
##  Wilcoxon rank sum test with continuity correction
## 
## data:  control_d$Control and hypocrisy_d$Hypocrisy
## W = 61494, p-value = 0.7627
## alternative hypothesis: true location shift is not equal to 0
#model

m_d <- lm(donation_p ~ condition_hc*cc_policy + ingroup_f*cc_policy + condition_hc*ingroup_f, data= wide_hc)

summary(m_d)
## 
## Call:
## lm(formula = donation_p ~ condition_hc * cc_policy + ingroup_f * 
##     cc_policy + condition_hc * ingroup_f, data = wide_hc)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.4948 -0.2411 -0.0400  0.3024  1.0130 
## 
## Coefficients:
##                                           Estimate Std. Error t value Pr(>|t|)
## (Intercept)                             -0.0348038  0.0669119  -0.520    0.603
## condition_hcHypocrisy                   -0.1152104  0.0828706  -1.390    0.165
## cc_policy                                0.0868108  0.0172591   5.030 6.25e-07
## ingroup_foutgroup                        0.0007393  0.0812061   0.009    0.993
## condition_hcHypocrisy:cc_policy          0.0227921  0.0201119   1.133    0.257
## cc_policy:ingroup_foutgroup             -0.0220627  0.0201090  -1.097    0.273
## condition_hcHypocrisy:ingroup_foutgroup  0.0750544  0.0679429   1.105    0.270
##                                            
## (Intercept)                                
## condition_hcHypocrisy                      
## cc_policy                               ***
## ingroup_foutgroup                          
## condition_hcHypocrisy:cc_policy            
## cc_policy:ingroup_foutgroup                
## condition_hcHypocrisy:ingroup_foutgroup    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4507 on 699 degrees of freedom
## Multiple R-squared:  0.1032, Adjusted R-squared:  0.09547 
## F-statistic:  13.4 on 6 and 699 DF,  p-value: 2.048e-14
tab_model(m_d,
  dv.labels = c("Donation"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Donation
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
(Intercept) -0.03 0.08 -0.17 – 0.10 -0.06 – 0.22 0.603 0.282
condition hc [Hypocrisy] -0.12 -0.08 -0.28 – 0.05 -0.28 – 0.12 0.165 0.414
cc policy 0.09 0.31 0.05 – 0.12 0.19 – 0.43 <0.001 <0.001
ingroup f [outgroup] 0.00 -0.15 -0.16 – 0.16 -0.35 – 0.04 0.993 0.129
condition hc [Hypocrisy]
× cc policy
0.02 0.08 -0.02 – 0.06 -0.06 – 0.22 0.257 0.257
cc policy × ingroup f
[outgroup]
-0.02 -0.08 -0.06 – 0.02 -0.22 – 0.06 0.273 0.273
condition hc [Hypocrisy]
× ingroup f [outgroup]
0.08 0.16 -0.06 – 0.21 -0.12 – 0.44 0.270 0.270
Observations 706
R2 / R2 adjusted 0.103 / 0.095
qqnorm(residuals(m_d))

#quick bootstrap 

library(broom)
bm_d <- lm.boot(m_d, 1000, rows = FALSE) #LOAD LIBRARY

summary(bm_d)
## BOOTSTRAP OF LINEAR MODEL  (method = residuals)
## 
## Original Model Fit
## ------------------
## Call:
## lm(formula = donation_p ~ condition_hc * cc_policy + ingroup_f * 
##     cc_policy + condition_hc * ingroup_f, data = wide_hc)
## 
## Coefficients:
##                             (Intercept)  
##                              -0.0348038  
##                   condition_hcHypocrisy  
##                              -0.1152104  
##                               cc_policy  
##                               0.0868108  
##                       ingroup_foutgroup  
##                               0.0007393  
##         condition_hcHypocrisy:cc_policy  
##                               0.0227921  
##             cc_policy:ingroup_foutgroup  
##                              -0.0220627  
## condition_hcHypocrisy:ingroup_foutgroup  
##                               0.0750544  
## 
## Bootstrap SD's:
##                             (Intercept)  
##                              0.06655895  
##                   condition_hcHypocrisy  
##                              0.08378141  
##                               cc_policy  
##                              0.01689672  
##                       ingroup_foutgroup  
##                              0.08000462  
##         condition_hcHypocrisy:cc_policy  
##                              0.02023413  
##             cc_policy:ingroup_foutgroup  
##                              0.01938476  
## condition_hcHypocrisy:ingroup_foutgroup  
##                              0.06784677
## checcking cc policy effect

m_d0 <- lm(donation_p ~ cc_policy, data= wide_hc)
summary(m_d0)
## 
## Call:
## lm(formula = donation_p ~ cc_policy, data = wide_hc)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.53870 -0.25453 -0.01408  0.32198  0.98592 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -0.07336    0.03741  -1.961   0.0503 .  
## cc_policy    0.08744    0.01004   8.709   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4506 on 704 degrees of freedom
## Multiple R-squared:  0.09726,    Adjusted R-squared:  0.09598 
## F-statistic: 75.85 on 1 and 704 DF,  p-value: < 2.2e-16
tab_model(m_d0,
  dv.labels = c("Donation"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Donation
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) -0.07 0.00 -0.15 – 0.00 -0.07 – 0.07 0.050
cc policy 0.09 0.31 0.07 – 0.11 0.24 – 0.38 <0.001
Observations 706
R2 / R2 adjusted 0.097 / 0.096
## effect size for cc policy 

d_eta <- effectsize::eta_squared(m_d)

m_d_eta <- d_eta$Eta2_partial[2]

d_d <- sqrt((4*m_d_eta)/(1 - m_d_eta))


bm_d0 <- lm.boot(m_d0, 1000, rows = FALSE)
summary(bm_d0)
## BOOTSTRAP OF LINEAR MODEL  (method = residuals)
## 
## Original Model Fit
## ------------------
## Call:
## lm(formula = donation_p ~ cc_policy, data = wide_hc)
## 
## Coefficients:
## (Intercept)    cc_policy  
##    -0.07336      0.08744  
## 
## Bootstrap SD's:
## (Intercept)    cc_policy  
## 0.037137639  0.009912579

Donation explained by plausibility

#aggregating plausibility to see the overall effect of plausibility on donation  

agg <- aggregate(likely ~ id_w, data = long_full, FUN = mean) 

agg$donation_p <- tapply(long_full$donation_p, long_full$id_w, unique)

# linear regression 

model_dp <- lm(donation_p ~ likely, data = agg)

summary(model_dp)
## 
## Call:
## lm(formula = donation_p ~ likely, data = agg)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.2615 -0.2413 -0.1604  0.3337  0.8598 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)  
## (Intercept)  0.11999    0.05400   2.222   0.0266 *
## likely       0.02021    0.01061   1.905   0.0572 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.473 on 704 degrees of freedom
## Multiple R-squared:  0.005126,   Adjusted R-squared:  0.003713 
## F-statistic: 3.627 on 1 and 704 DF,  p-value: 0.05725
# table 

tab_model(model_dp,
  dv.labels = c("Donation"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Donation
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 0.12 -0.00 0.01 – 0.23 -0.07 – 0.07 0.027
likely 0.02 0.07 -0.00 – 0.04 -0.00 – 0.15 0.057
Observations 706
R2 / R2 adjusted 0.005 / 0.004

Full bootstrap for donation analysis

# Bootstrap 95% CI for regression coefficients

# function to obtain regression weights

bs <- function(formula, data, indices)
{
  d <- data[indices,] # allows boot to select sample
  fit <- lm(formula, data=d)
  return(coef(fit))
}
# bootstrapping with 1000 replications
results <- boot(data=wide_hc, statistic=bs,
   R=1000, formula=donation_p ~ condition_hc*cc_policy + ingroup_f*cc_policy + condition_hc*ingroup_f)



# view results
results
## 
## ORDINARY NONPARAMETRIC BOOTSTRAP
## 
## 
## Call:
## boot(data = wide_hc, statistic = bs, R = 1000, formula = donation_p ~ 
##     condition_hc * cc_policy + ingroup_f * cc_policy + condition_hc * 
##         ingroup_f)
## 
## 
## Bootstrap Statistics :
##          original        bias    std. error
## t1* -0.0348037874  6.269432e-04  0.06667804
## t2* -0.1152104289  1.169171e-04  0.08613200
## t3*  0.0868107653  5.145989e-05  0.01752804
## t4*  0.0007393371 -1.753616e-03  0.08124633
## t5*  0.0227920811 -1.923594e-04  0.02142423
## t6* -0.0220626572  8.144031e-04  0.02084577
## t7*  0.0750544316 -1.354334e-03  0.06572146
plot(results, index=1) # intercept

plot(results, index=2) # 

plot(results, index=3) # 

plot(results, index=4) # 

plot(results, index=5) #

plot(results, index=6) #

plot(results, index=7) #

# get 95% confidence intervals
boot.ci(results, type="bca", index=1) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 1)
## 
## Intervals : 
## Level       BCa          
## 95%   (-0.1702,  0.0916 )  
## Calculations and Intervals on Original Scale
boot.ci(results, type="bca", index=2) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 2)
## 
## Intervals : 
## Level       BCa          
## 95%   (-0.2907,  0.0514 )  
## Calculations and Intervals on Original Scale
boot.ci(results, type="bca", index=3) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 3)
## 
## Intervals : 
## Level       BCa          
## 95%   ( 0.0542,  0.1218 )  
## Calculations and Intervals on Original Scale
boot.ci(results, type="bca", index=4) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 4)
## 
## Intervals : 
## Level       BCa          
## 95%   (-0.1522,  0.1601 )  
## Calculations and Intervals on Original Scale
boot.ci(results, type="bca", index=5) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 5)
## 
## Intervals : 
## Level       BCa          
## 95%   (-0.0214,  0.0626 )  
## Calculations and Intervals on Original Scale
boot.ci(results, type="bca", index=6) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 6)
## 
## Intervals : 
## Level       BCa          
## 95%   (-0.0594,  0.0211 )  
## Calculations and Intervals on Original Scale
boot.ci(results, type="bca", index=7) # 
## BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
## Based on 1000 bootstrap replicates
## 
## CALL : 
## boot.ci(boot.out = results, type = "bca", index = 7)
## 
## Intervals : 
## Level       BCa          
## 95%   (-0.0499,  0.1984 )  
## Calculations and Intervals on Original Scale

Exploratory analysis

Emotions analysis

##grouping by positive and negative emotions

negative<- as.data.frame(emo) %>%
  select(angry, frustrated, worried)

alpha(negative)
## 
## Reliability analysis   
## Call: alpha(x = negative)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean sd median_r
##       0.91      0.91    0.88      0.78  11 0.0032   36 33     0.77
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.91  0.91  0.92
## Duhachek  0.91  0.91  0.92
## 
##  Reliability if an item is dropped:
##            raw_alpha std.alpha G6(smc) average_r  S/N alpha se var.r med.r
## angry           0.87      0.87    0.77      0.77  6.6   0.0058    NA  0.77
## frustrated      0.85      0.85    0.74      0.74  5.6   0.0066    NA  0.74
## worried         0.91      0.91    0.84      0.84 10.6   0.0038    NA  0.84
## 
##  Item statistics 
##               n raw.r std.r r.cor r.drop mean sd
## angry      2115  0.93  0.93  0.88   0.84   34 36
## frustrated 2115  0.94  0.94  0.91   0.86   42 38
## worried    2115  0.90  0.90  0.81   0.78   33 35
positive<- as.data.frame(emo) %>%
  select(enthusiastic, hopeful, proud)

alpha(positive)
## 
## Reliability analysis   
## Call: alpha(x = positive)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean sd median_r
##       0.95      0.95    0.93      0.86  19 0.0019   16 26     0.86
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.94  0.95  0.95
## Duhachek  0.94  0.95  0.95
## 
##  Reliability if an item is dropped:
##              raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## enthusiastic      0.91      0.92    0.85      0.85  11   0.0037    NA  0.85
## hopeful           0.93      0.94    0.88      0.88  14   0.0028    NA  0.88
## proud             0.93      0.93    0.86      0.86  13   0.0032    NA  0.86
## 
##  Item statistics 
##                 n raw.r std.r r.cor r.drop mean sd
## enthusiastic 2115  0.96  0.96  0.93   0.91   16 27
## hopeful      2115  0.95  0.95  0.90   0.88   19 29
## proud        2115  0.95  0.95  0.92   0.89   15 26
emo_g <- as.data.frame(emo)%>%
  rowwise()%>%
  mutate(positive = mean(enthusiastic, hopeful, proud), negative = mean(angry, frustrated, worried))

long_emo <- merge(long_hc, emo_g)

#negative 

neg_full <- lmer(negative~condition_hc + ingroup_f + (1|topic), data = long_emo)

summary(neg_full)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: negative ~ condition_hc + ingroup_f + (1 | topic)
##    Data: long_emo
## 
## REML criterion at convergence: 21025.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.4561 -0.8547 -0.3641  0.8512  2.3133 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept)   32.78   5.726  
##  Residual             1201.01  34.656  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                       Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)             24.394      3.553    2.413   6.865   0.0123 *  
## condition_hcHypocrisy   10.291      1.506 2113.000   6.833 1.09e-11 ***
## ingroup_foutgroup        9.551      1.506 2113.000   6.341 2.78e-10 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H
## cndtn_hcHyp -0.208       
## ingrp_ftgrp -0.214 -0.006
tab_model(neg_full,
  dv.labels = c("Negative Emotions"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Negative Emotions
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 24.39 -0.28 17.43 – 31.36 -0.47 – -0.08 <0.001
condition hc [Hypocrisy] 10.29 0.29 7.34 – 13.24 0.21 – 0.37 <0.001
ingroup f [outgroup] 9.55 0.27 6.60 – 12.51 0.19 – 0.35 <0.001
Random Effects
σ2 1201.01
τ00 topic 32.78
ICC 0.03
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.039 / 0.064
pos_full <- lmer(positive~condition_hc + ingroup_f + (1|topic), data = long_emo)

summary(pos_full)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: positive ~ condition_hc + ingroup_f + (1 | topic)
##    Data: long_emo
## 
## REML criterion at convergence: 19857.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -0.9044 -0.6477 -0.4225  0.1156  3.5251 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept)   9.868   3.141  
##  Residual             691.758  26.301  
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##                       Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)             21.126      2.066    2.786   10.23  0.00273 ** 
## condition_hcHypocrisy   -4.675      1.143 2113.000   -4.09 4.48e-05 ***
## ingroup_foutgroup       -5.921      1.143 2113.000   -5.18 2.43e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndt_H
## cndtn_hcHyp -0.272       
## ingrp_ftgrp -0.279 -0.006
tab_model(pos_full,
  dv.labels = c("Positive Emotions"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Positive Emotions
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 21.13 0.20 17.07 – 25.18 0.05 – 0.35 <0.001
condition hc [Hypocrisy] -4.67 -0.18 -6.92 – -2.43 -0.26 – -0.09 <0.001
ingroup f [outgroup] -5.92 -0.22 -8.16 – -3.68 -0.31 – -0.14 <0.001
Random Effects
σ2 691.76
τ00 topic 9.87
ICC 0.01
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.020 / 0.034
mean(long_emo$positive)
## [1] 15.81209
sd(long_emo$positive)
## [1] 26.68333
##effect sizes

neg_full_eta <- effectsize::eta_squared(neg_full)

neg_full_eta_hc <- neg_full_eta$Eta2_partial[1]

d_neg_full_hc <- sqrt((4*neg_full_eta_hc)/(1 - neg_full_eta_hc))

neg_full_eta_out <- neg_full_eta$Eta2_partial[2]

d_neg_full_out <- sqrt((4*neg_full_eta_out)/(1 - neg_full_eta_out))


### positive

pos_full_eta <- effectsize::eta_squared(pos_full)

pos_full_eta_hc <- pos_full_eta$Eta2_partial[1]

d_pos_full_hc <- sqrt((4*pos_full_eta_hc)/(1 - pos_full_eta_hc))

pos_full_eta_out <- pos_full_eta$Eta2_partial[2]

d_pos_full_out <- sqrt((4*pos_full_eta_out)/(1 - pos_full_eta_out))

Plotting exploratory analysis

#negative emotions

p_neg <- ggplot(long_emo, aes(x = interaction(ingroup_f, condition_hc), y = negative, fill = ingroup_f)) +
  geom_violin(outlier.shape = NA) + # Boxplot without outliers
  geom_point(position = position_jitter(width = 0.2), alpha = 0.4)+
   stat_summary(fun = mean, fun.min = mean, fun.max = mean, 
               geom = "crossbar", width = 0.3, color = "darkgray") + # Mean line
  #stat_summary(fun = mean, geom = "point", shape = 20, size = 3, color = "blue") + # Median line
  #stat_boxplot(geom = "errorbar", width = 0.25) + # Error lines
  scale_fill_manual(values = c("#CC57A7", "#001E73")) + # Customize colors
  labs(title = "Effects on negative emotions", x = "Condition", y = "Negative emotions (low to high)", fill = "Group") +
    scale_x_discrete(labels = custom_labels) + # Use custom labels for the x-axis
  theme_minimal()



# Add significance annotation using ggsignif
p_neg + geom_signif(comparisons = list(c("ingroup.Hypocrisy", "ingroup.Control")),
                map_signif_level = TRUE,
                y_position = max(long_emo$negative) + 1) +
    geom_signif(comparisons = list(c("outgroup.Hypocrisy", "outgroup.Control")),
                map_signif_level = TRUE,
                y_position = max(long_emo$negative) + 4 ) +
    geom_signif(comparisons = list(c("ingroup.Hypocrisy", "outgroup.Hypocrisy")),
                map_signif_level = TRUE,
                y_position = max(long_emo$negative) + 7) +
    geom_signif(comparisons = list(c("ingroup.Control", "outgroup.Control")), 
                map_signif_level = TRUE,
                y_position = max(long_emo$negative) + 10) 

# positive emotions 

p_pos <- ggplot(long_emo, aes(x = interaction(ingroup_f, condition_hc), y = positive, fill = ingroup_f)) +
  geom_violin(outlier.shape = NA) + # Boxplot without outliers
  geom_point(position = position_jitter(width = 0.2), alpha = 0.4)+
   stat_summary(fun = mean, fun.min = mean, fun.max = mean, 
               geom = "crossbar", width = 0.3, color = "darkgray") + # Mean line
  #stat_summary(fun = mean, geom = "point", shape = 20, size = 3, color = "blue") + # Median line
  #stat_boxplot(geom = "errorbar", width = 0.25) + # Error lines
  scale_fill_manual(values = c("#AA57B7", "#281E73")) + # Customize colors
  labs(title = "Effects on positive emotions", x = "Condition", y = "Positive emotions (low to high)", fill = "Group") +
    scale_x_discrete(labels = custom_labels) + # Use custom labels for the x-axis
  theme_minimal()



# Add significance annotation using ggsignif
p_pos + geom_signif(comparisons = list(c("ingroup.Hypocrisy", "ingroup.Control")),
                map_signif_level = TRUE,
                y_position = max(long_emo$positive) + 1) +
    geom_signif(comparisons = list(c("outgroup.Hypocrisy", "outgroup.Control")),
                map_signif_level = TRUE,
                y_position = max(long_emo$positive) + 4 ) +
    geom_signif(comparisons = list(c("ingroup.Hypocrisy", "outgroup.Hypocrisy")),
                map_signif_level = TRUE,
                y_position = max(long_emo$positive) + 7) +
    geom_signif(comparisons = list(c("ingroup.Control", "outgroup.Control")), 
                map_signif_level = TRUE,
                y_position = max(long_emo$positive) + 10) 

Likely (plausibility) analysis (likely ~ policy supp)

mlikely <- lmer(likely ~ cc_policy + (1|topic) , data = long_full)

mlikely2 <- lm(likely ~ cc_policy , data = long_full)

summary(mlikely)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: likely ~ cc_policy + (1 | topic)
##    Data: long_full
## 
## REML criterion at convergence: 8173.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5442 -0.5789  0.1839  0.7448  1.5076 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.000    0.000   
##  Residual             2.766    1.663   
## Number of obs: 2118, groups:  topic, 3
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 4.359e+00  7.972e-02 2.116e+03  54.671  < 2e-16 ***
## cc_policy   1.342e-01  2.139e-02 2.116e+03   6.275 4.22e-10 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## cc_policy -0.891
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
ggplot(long_full, aes(x = cc_policy, y = likely)) +
  geom_smooth(method = "lm", se = TRUE, color = "blue", size = 1) +
  labs(x = "Climate Change Policy Support (cc_policy)",
       y = "Plausibility",
       title = "Plausibility by Climate Policy Support") +
  theme_minimal()

tab_model(mlikely,
  dv.labels = c("Plausibility"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Plausibility
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 4.36 -0.00 4.20 – 4.51 -0.04 – 0.04 <0.001
cc policy 0.13 0.14 0.09 – 0.18 0.09 – 0.18 <0.001
Random Effects
σ2 2.77
τ00 topic 0.00
N topic 3
Observations 2118
Marginal R2 / Conditional R2 0.018 / NA
## aggregated so that i see likely overall

#aggregating plausibility to see the overall effect of plausibility on donation  

agg2 <- aggregate(likely ~ id_w, data = long_full, FUN = mean) 

agg2$cc_policy <- tapply(long_full$cc_policy, long_full$id_w, unique)

mlikely2 <- lm(likely ~ cc_policy , data = agg2)

summary(mlikely2)
## 
## Call:
## lm(formula = likely ~ cc_policy, data = agg2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.2312 -0.9543  0.3058  1.2387  2.5072 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.35859    0.13822   31.54  < 2e-16 ***
## cc_policy    0.13424    0.03709    3.62 0.000316 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.665 on 704 degrees of freedom
## Multiple R-squared:  0.01827,    Adjusted R-squared:  0.01687 
## F-statistic:  13.1 on 1 and 704 DF,  p-value: 0.0003163
tab_model(mlikely2,
  dv.labels = c("Plausibility"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Plausibility
Coefficient Est. Std. Beta CI (95%) Std. CI p
(Intercept) 4.36 -0.00 4.09 – 4.63 -0.07 – 0.07 <0.001
cc policy 0.13 0.14 0.06 – 0.21 0.06 – 0.21 <0.001
Observations 706
R2 / R2 adjusted 0.018 / 0.017