Loading data and preparing data

data <- import(here("data", "study1_complete_151222.xlsx")) 
data <- data %>% 
  mutate(condition = as.factor(ifelse( is.na(c_flight_1), 'Hipocrisy', 'Control')))%>%
  mutate(attention_check = as.factor(attention_check))

data_long <- gather(data, topic, judgment, c_flight_1:h_attention_1, factor_key=TRUE)%>%
  filter(judgment!= 'NA')

data_long2 <- gather(data_long, env_topic, env_values, ev_needs_1:ev_attention_1, factor_key=TRUE) %>%
  filter(judgment!= 'NA')



data_ok1 <- data_long %>% 
  mutate(topic = gsub("c_", "", as.factor(topic))) %>%
  mutate(topic = gsub("h_", "", as.factor(topic))) %>% 
  mutate(topic = gsub("_1", "", as.factor(topic))) %>%
  filter(attention_check != 'NA')%>%
  filter(attention_check == 'Hamburguer')%>%
  filter(topic != 'attention')%>%
  filter(workerId != 'A1VAZ4O0J5QD7S')%>% #removing participants that did not complete the survey 
  filter(workerId != 'A1EDLMO8ACUTBP')%>%
  filter(workerId != 'A1DC5O5XEIMM1M')%>%
    rowwise()%>%
  mutate(pol_mean = mean(c(pol_electricity_1, pol_gasoline_1, pol_school_1, pol_tax_1)))%>%
  mutate_at('judgment', as.numeric)


data_okwide <- data_ok1%>%
  pivot_wider(names_from = topic, values_from = judgment)%>%
  rowwise()%>%
  mutate(judgment_m = (flight + recycle + beef+ car)/4)%>%
  rowwise()%>%
  mutate(pol_mean = mean(c(pol_electricity_1, pol_gasoline_1, pol_school_1, pol_tax_1)))%>%
  mutate_at('judgment_m', as.numeric) %>%
  mutate(pol_or_num = case_when(pol_orientation == "Other" ~ 0, pol_orientation == "Very Liberal/Left wing" ~ 1, pol_orientation == "Slightly Liberal/Left wing" ~ 2, pol_orientation == "Moderate" ~ 3, pol_orientation == "Slightly Conservative/Right wing" ~ 4, pol_orientation == "Very Conservative/Right wing" ~ 5 ))%>%
  mutate_at("pol_or_num", as.numeric)



data_mean <- data %>% 
  rowwise() %>% 
  dplyr::mutate(pol_mean = mean(c(pol_electricity_1, pol_gasoline_1, pol_school_1, pol_tax_1))) %>%
  dplyr::mutate(control_mean = mean(c(c_flight_1, c_recycle_1, c_beef_1, c_car_1))) %>%
  dplyr::mutate(hipocrisy_mean = mean(c(h_flight_1, h_recycle_1, h_beef_1, h_car_1))) %>%
  dplyr::mutate(all_mean = mean(c(na.omit(c_flight_1), na.omit(c_recycle_1), na.omit(c_beef_1), na.omit(c_car_1), na.omit(h_flight_1), na.omit(h_recycle_1), na.omit(h_beef_1), na.omit(h_car_1)))) 

Demographics

data_okwide %<>% mutate_at('age', as.numeric)%>%
  filter(age != 'NA')

age_mean <- mean(data_okwide$age)

age_mean
## [1] 41.03906
age_sd <- sd(data_okwide$age)

age_sd
## [1] 11.0421
data_pol<- data_okwide %>%
  select(pol_orientation)%>%
  group_by(pol_orientation)%>%
  dplyr::summarise(avg = (n()/256)*100)

data_pol
## # A tibble: 6 × 2
##   pol_orientation                    avg
##   <chr>                            <dbl>
## 1 Moderate                         17.6 
## 2 Other                             1.17
## 3 Slightly Conservative/Right wing 17.2 
## 4 Slightly Liberal/Left wing       21.5 
## 5 Very Conservative/Right wing     12.1 
## 6 Very Liberal/Left wing           30.5
data_gen<- data_okwide %>%
  select(gender)%>%
  group_by(gender)%>%
  dplyr::summarise(avg = (n()/256)*100)

data_gen
## # A tibble: 4 × 2
##   gender               avg
##   <chr>              <dbl>
## 1 Female            45.7  
## 2 Male              53.5  
## 3 Non-binary         0.391
## 4 Prefer not to say  0.391
data_edu<- data_okwide %>%
  select(education)%>%
  group_by(education)%>%
  dplyr::summarise(avg = (n()/256)*100)

data_edu
## # A tibble: 9 × 2
##   education                      avg
##   <chr>                        <dbl>
## 1 2 year/Associate’s degree  8.98 
## 2 4 year/Bachelor’s degree  46.1  
## 3 Doctorate                    1.17 
## 4 High school graduate        13.3  
## 5 Less than high school        0.781
## 6 Master's degree              6.64 
## 7 Professional degree          1.56 
## 8 Some college                19.9  
## 9 Some graduate school         1.56
# political orientation distribution - quick look 
hist(data_okwide$pol_or_num)

Hypothesis 1: Testing Hypocrisy penalty

## First, lets check if we can average Judgment across topics 

data_c <- data %>%
    filter(attention_check != 'NA')%>%
  filter(attention_check == 'Hamburguer')%>%
  filter(workerId != 'A1VAZ4O0J5QD7S')%>% #removing participants that did not complete the survey 
  filter(workerId != 'A1EDLMO8ACUTBP')%>%
  filter(workerId != 'A1DC5O5XEIMM1M')%>%
  select(starts_with('c_'), -starts_with('c_attention'))%>%
  na.omit()

alpha(data_c)
## 
## Reliability analysis   
## Call: alpha(x = data_c)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.78      0.78    0.75      0.47 3.5 0.031  4.4 1.1     0.48
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.71  0.78  0.84
## Duhachek  0.72  0.78  0.84
## 
##  Reliability if an item is dropped:
##             raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## c_flight_1       0.68      0.68    0.61      0.41 2.1    0.049 0.0290  0.35
## c_recycle_1      0.81      0.81    0.75      0.59 4.4    0.028 0.0019  0.60
## c_beef_1         0.70      0.70    0.65      0.44 2.3    0.045 0.0323  0.41
## c_car_1          0.70      0.70    0.62      0.43 2.3    0.046 0.0103  0.41
## 
##  Item statistics 
##               n raw.r std.r r.cor r.drop mean  sd
## c_flight_1  130  0.84  0.83  0.77   0.68  4.6 1.4
## c_recycle_1 130  0.65  0.66  0.46   0.40  3.4 1.4
## c_beef_1    130  0.81  0.81  0.72   0.63  4.7 1.5
## c_car_1     130  0.81  0.81  0.75   0.64  5.0 1.4
## 
## Non missing response frequency for each item
##                1    2    3    4    5    6    7 miss
## c_flight_1  0.03 0.02 0.08 0.53 0.08 0.10 0.16    0
## c_recycle_1 0.06 0.18 0.29 0.32 0.07 0.04 0.04    0
## c_beef_1    0.02 0.03 0.08 0.45 0.10 0.14 0.18    0
## c_car_1     0.00 0.01 0.05 0.48 0.08 0.12 0.25    0
data_h <- data %>%
     filter(attention_check != 'NA')%>%
  filter(attention_check == 'Hamburguer')%>%
  filter(workerId != 'A1VAZ4O0J5QD7S')%>% #removing participants that did not complete the survey 
  filter(workerId != 'A1EDLMO8ACUTBP')%>%
  filter(workerId != 'A1DC5O5XEIMM1M')%>%
  select(starts_with('h_'), -starts_with('h_attention'))%>%
  na.omit()

alpha(data_h)
## 
## Reliability analysis   
## Call: alpha(x = data_h)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.86      0.86    0.82       0.6   6 0.021  2.5 1.2      0.6
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.81  0.86  0.89
## Duhachek  0.82  0.86  0.90
## 
##  Reliability if an item is dropped:
##             raw_alpha std.alpha G6(smc) average_r S/N alpha se   var.r med.r
## h_flight_1       0.81      0.81    0.74      0.58 4.2    0.030 0.00299  0.60
## h_recycle_1      0.84      0.84    0.77      0.63 5.1    0.025 0.00083  0.62
## h_beef_1         0.83      0.83    0.76      0.62 4.8    0.026 0.00137  0.60
## h_car_1          0.80      0.80    0.73      0.57 4.0    0.031 0.00199  0.59
## 
##  Item statistics 
##               n raw.r std.r r.cor r.drop mean  sd
## h_flight_1  127  0.85  0.85  0.79   0.73  2.3 1.4
## h_recycle_1 127  0.81  0.81  0.71   0.66  2.6 1.4
## h_beef_1    127  0.82  0.82  0.73   0.67  2.6 1.3
## h_car_1     127  0.86  0.86  0.80   0.74  2.6 1.4
## 
## Non missing response frequency for each item
##                1    2    3    4    5    6    7 miss
## h_flight_1  0.31 0.32 0.21 0.08 0.02 0.03 0.02    0
## h_recycle_1 0.22 0.31 0.27 0.09 0.05 0.03 0.02    0
## h_beef_1    0.20 0.35 0.24 0.13 0.06 0.02 0.02    0
## h_car_1     0.27 0.26 0.25 0.15 0.02 0.04 0.02    0
#alphas are .78 and .85 (0.82 on average), which is acceptable for combining through topics/vignettes

## Pre-registered analysis: 

mh1_ok <- lm(judgment_m ~ condition, data= data_okwide)

summary(mh1_ok)
## 
## Call:
## lm(formula = judgment_m ~ condition, data = data_okwide)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.9365 -0.6865 -0.2361  0.7143  4.4643 
## 
## Coefficients:
##                    Estimate Std. Error t value Pr(>|t|)    
## (Intercept)         4.43654    0.09877   44.92   <2e-16 ***
## conditionHipocrisy -1.90082    0.14079  -13.50   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.126 on 254 degrees of freedom
## Multiple R-squared:  0.4178, Adjusted R-squared:  0.4155 
## F-statistic: 182.3 on 1 and 254 DF,  p-value: < 2.2e-16
qqnorm(residuals(mh1_ok))

# the model does not follow normality of residuals, so since it is just a mean comparison, we use an appropriate test for our data: 


# double check there is no missing data before running wilcox test 

data_okwide2 <- data_okwide %>% 
  select(condition, judgment_m)%>%
  filter(!is.na(judgment_m) & !is.na(condition))


## wilcoxon test 

test_result <- wilcox.test(judgment_m ~ condition, data= data_okwide2)

# obtaining CIs 

wilcox.test(judgment_m ~ condition, data= data_okwide2, conf.int = T, conf.level = .95)
## 
##  Wilcoxon rank sum test with continuity correction
## 
## data:  judgment_m by condition
## W = 14679, p-value < 2.2e-16
## alternative hypothesis: true location shift is not equal to 0
## 95 percent confidence interval:
##  1.749950 2.249922
## sample estimates:
## difference in location 
##               1.999958
# Extract W statistic from the test result
W <- test_result$statistic

# Get the sample sizes for each group (assuming 'condition' is a factor with two levels)
n1 <- sum(data_okwide2$condition == "Control")  # Number of observations in the Control group
n2 <- sum(data_okwide2$condition == "Hipocrisy")  # Number of observations in the Hipocrisy group

# Calculate the mean (mu_W) and standard deviation (sigma_W) of W
mu_W <- (n1 * n2) / 2
sigma_W <- sqrt((n1 * n2 * (n1 + n2 + 1)) / 12)

# Calculate the Z statistic (standardized test statistic)
Z <- (W - mu_W) / sigma_W

# Calculate the rank-biserial correlation (effect size)
r <- Z / sqrt(n1 + n2)

# Print the Z statistic and the effect size
Z
##        W 
## 10.95582
r
##         W 
## 0.6847387
## let's get the means and sd of Mena judgment by condition

means_sd_hc <- data_okwide2 %>%
  group_by(condition) %>%
  dplyr::summarise(mean = mean(judgment_m), sd = sd(judgment_m))

Hypothesis 2: interaciton with CC mitigation policy support

# let's check alpha for policy support across items

ccp_supp <- data_okwide %>%
  select(pol_electricity_1, pol_gasoline_1, pol_school_1, pol_tax_1)

alpha(ccp_supp) #alpha of 0.85, very good 
## 
## Reliability analysis   
## Call: alpha(x = ccp_supp)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.85      0.85    0.82      0.59 5.7 0.015  3.6 1.7     0.58
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.82  0.85  0.88
## Duhachek  0.82  0.85  0.88
## 
##  Reliability if an item is dropped:
##                   raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r
## pol_electricity_1      0.83      0.83    0.79      0.62 4.9    0.018 0.0140
## pol_gasoline_1         0.77      0.77    0.70      0.53 3.4    0.025 0.0056
## pol_school_1           0.85      0.85    0.80      0.66 5.7    0.016 0.0077
## pol_tax_1              0.78      0.78    0.72      0.55 3.6    0.024 0.0075
##                   med.r
## pol_electricity_1  0.57
## pol_gasoline_1     0.54
## pol_school_1       0.62
## pol_tax_1          0.57
## 
##  Item statistics 
##                     n raw.r std.r r.cor r.drop mean  sd
## pol_electricity_1 256  0.80  0.80  0.69   0.64  4.4 2.1
## pol_gasoline_1    256  0.89  0.89  0.86   0.78  3.2 2.1
## pol_school_1      256  0.77  0.77  0.64   0.59  3.0 2.0
## pol_tax_1         256  0.87  0.87  0.83   0.76  3.7 2.1
## 
## Non missing response frequency for each item
##                      1    2    3    4    5    6    7 miss
## pol_electricity_1 0.14 0.09 0.08 0.12 0.20 0.17 0.20    0
## pol_gasoline_1    0.33 0.13 0.12 0.09 0.14 0.11 0.08    0
## pol_school_1      0.35 0.15 0.16 0.10 0.08 0.08 0.09    0
## pol_tax_1         0.22 0.13 0.12 0.10 0.22 0.09 0.12    0
# quick look at distribution: 
  
  hist(data_okwide$pol_mean)

# model as pre-registered (no random effects)

mh2_ok<- lm(judgment_m~condition*pol_mean, data= data_okwide)

summary(mh2_ok)
## 
## Call:
## lm(formula = judgment_m ~ condition * pol_mean, data = data_okwide)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.0766 -0.7900 -0.1669  0.6378  4.6186 
## 
## Coefficients:
##                             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                  4.93228    0.22353  22.065  < 2e-16 ***
## conditionHipocrisy          -2.86813    0.32381  -8.857  < 2e-16 ***
## pol_mean                    -0.14227    0.05778  -2.462  0.01449 *  
## conditionHipocrisy:pol_mean  0.26916    0.08130   3.311  0.00107 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.107 on 252 degrees of freedom
## Multiple R-squared:  0.4421, Adjusted R-squared:  0.4355 
## F-statistic: 66.57 on 3 and 252 DF,  p-value: < 2.2e-16
eta_2 <- effectsize::eta_squared(mh2_ok)

m_eta_2 <- eta_2$Eta2_partial[3]

d_2 <- sqrt((4*m_eta_2)/(1 - m_eta_2))

qqnorm(residuals(mh2_ok)) # normality of residuals does not look great

## Let's try same model but random effects for vignettes' topics

mh2_pol <- lmer(judgment ~ condition*pol_mean + (1|topic), data = data_ok1)

summary(mh2_pol)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: judgment ~ condition * pol_mean + (1 | topic)
##    Data: data_ok1
## 
## REML criterion at convergence: 3648.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6099 -0.6388 -0.2350  0.6394  3.5832 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  topic    (Intercept) 0.1127   0.3357  
##  Residual             1.9957   1.4127  
## Number of obs: 1028, groups:  topic, 4
## 
## Fixed effects:
##                               Estimate Std. Error         df t value Pr(>|t|)
## (Intercept)                    4.93228    0.22027    7.78176  22.392 2.40e-08
## conditionHipocrisy            -2.86822    0.20654 1021.00000 -13.887  < 2e-16
## pol_mean                      -0.14227    0.03688 1021.00000  -3.858 0.000122
## conditionHipocrisy:pol_mean    0.26916    0.05188 1021.00000   5.188 2.57e-07
##                                
## (Intercept)                 ***
## conditionHipocrisy          ***
## pol_mean                    ***
## conditionHipocrisy:pol_mean ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnH pol_mn
## cndtnHpcrsy -0.447              
## pol_mean    -0.583  0.622       
## cndtnHpcr:_  0.415 -0.904 -0.711
qqnorm(residuals(mh2_pol)) #normality of residuals improves in the mixed effects model, we keep this for reporting in the paper

H2: effect size and tables

m2_eta2 <- effectsize::eta_squared(mh2_pol)
m2_c_eta2 <- m2_eta2$Eta2_partial[1]
m2_p_eta2 <- m2_eta2$Eta2_partial[2]
m2_int_eta2 <- m2_eta2$Eta2_partial[3]

d_c_m2 <- sqrt((4*m2_c_eta2)/(1 - m2_c_eta2))
d_p_m2 <- sqrt((4*m2_p_eta2)/(1 - m2_p_eta2))
d_int_m2 <- sqrt((4*m2_int_eta2)/(1 - m2_int_eta2))

# interaction term effect size


d_int_m2 
## [1] 0.3247137
# tables

### random effect model 

tab_model(mh2_pol,   pred.labels = c("Intercept", "Condition (Hipocrisy)", "Policy Support", "Hypocrisy*PolicySupp"),
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
Intercept 4.93 0.53 4.50 – 5.36 0.33 – 0.73 <0.001 <0.001
Condition (Hipocrisy) -2.87 -1.09 -3.27 – -2.46 -1.19 – -0.99 <0.001 <0.001
Policy Support -0.14 -0.14 -0.21 – -0.07 -0.21 – -0.07 <0.001 <0.001
Hypocrisy*PolicySupp 0.27 0.26 0.17 – 0.37 0.16 – 0.36 <0.001 <0.001
Random Effects
σ2 2.00
τ00 topic 0.11
ICC 0.05
N topic 4
Observations 1028
Marginal R2 / Conditional R2 0.312 / 0.349
### model without random effects

tab_model(mh2_ok,   pred.labels = c("Intercept", "Condition (Hipocrisy)", "Policy Support", "Hypocrisy*PolicySupp"),
  dv.labels = c("Judgment"),
  string.pred = "Coefficient",
  string.ci = "CI (95%)",
  string.p = "p",
  show.std = TRUE, 
  string.std_ci = "Std. CI",
  string.std = "Std. Beta",
  string.est = "Est.")
  Judgment
Coefficient Est. Std. Beta CI (95%) Std. CI p std. p
Intercept 4.93 0.62 4.49 – 5.37 0.49 – 0.75 <0.001 <0.001
Condition (Hipocrisy) -2.87 -1.29 -3.51 – -2.23 -1.47 – -1.10 <0.001 <0.001
Policy Support -0.14 -0.17 -0.26 – -0.03 -0.30 – -0.03 0.014 0.014
Hypocrisy*PolicySupp 0.27 0.31 0.11 – 0.43 0.13 – 0.50 0.001 0.001
Observations 256
R2 / R2 adjusted 0.442 / 0.435

Figure

okabe_ito_colors <- c( "#E69F00", "maroon3")
okabe_ito_colors_1 <- c("springgreen4","lightgreen")
okabe_ito_colors_2 <- c("maroon3", "pink2")


data_okwide%>%
  mutate_at('condition', as.factor)%>%
ggplot( aes(x = pol_mean, y = judgment_m, color = condition)) +
  stat_summary_bin(fun = "mean", bins = 7, geom = "point", size = 3) +  # Binned points
  geom_smooth(method = "loess", span = 0.5, se = TRUE) + # Loess smooth line with CI
  geom_jitter(alpha= 0.3)+
  scale_color_manual(values = okabe_ito_colors, labels = c("Control", "Hypocrisy") ) +  # Color-blind friendly 
  xlim(1,7)+
  theme_minimal() +
  labs(title = "CC Hypocrisy Penalty", subtitle = "Judgment of Climate Hypocrisy vs Climate Harm, by levels of CC mitigation support",
       x = "CC Mitigation support (Low to High)",
       y = "Moral Judgment (Wrong to Right)", color= 'Condition')