#
#
############ Moderated regression and simple slopes analysis (with graphs) ############
#
#
# This script assumes you have downloaded and installed all available packages.
# Packages needed for this script will be noted as "library(xxxxx)" where xxxxx
# refers to the required package/library to be loaded.
# Make sure you have the R-Commander library loaded.
library(foreign)
library(Rcmdr)
# Use the 'foreign' library to load the "Cars_Trimmed.sav" data file.
CarsTrimmed <- read.spss("http://www.unt.edu/rss/class/Jon/R_SC/Module9/Car_Trimmed.sav",
use.value.labels=TRUE, max.value.labels=Inf, to.data.frame=TRUE)
# From here on, simply use this script; submitted or pasted into the
# R Console.
attach(CarsTrimmed)
names(CarsTrimmed)
nrow(CarsTrimmed)
cor(CarsTrimmed)
##########################################################################
# Analyze the moderating influence of accel on the relationship
# between mpg and weight.
# First, center the predictor (weight) and the moderator (accel); which is
# simply subtracting the mean of each variable from each variable.
mean(weight)
mean(accel)
c.weight <- weight - 2960.374
c.accel <- accel - 15.54296
cars1 <- cbind(CarsTrimmed, c.weight, c.accel)
rm(c.accel, c.weight)
names(cars1)
detach(CarsTrimmed)
attach(cars1)
# Create the interaction product term (c.weight * c.accel).
interact <- c.weight * c.accel
cars2 <- cbind(cars1, interact)
detach(cars1)
rm(cars1,interact)
attach(cars2)
head(cars2)
# Run two regressions and compare to determine if the interaction is meaningfully contributing
# to the model.
RegMod.1 <- lm(mpg~c.weight+c.accel)
summary(RegMod.1)
RegMod.2 <- lm(mpg~c.weight+c.accel+interact)
summary(RegMod.2)
anova(RegMod.1,RegMod.2)
# The Akaike Information Criterion (AIC).
AIC(RegMod.1)
AIC(RegMod.2)
# The Bayesian Information Criterion (BIC)
AIC(RegMod.1, k = log(nrow(CarsTrimmed)))
AIC(RegMod.2, k = log(nrow(CarsTrimmed)))
##########################################################################
########## Simple Slopes Analysis (t-test) ##########
# Construct the resulting linear regression equation using the "Coefficients"
# table from
RegMod.2$coefficients
# mpg = 23.09228 + -.00738(c.weight) + .38335(c.accel) + -.00045(interact).
# Or, we could write the same equation like this:
# mpg = 23.09228 + -.00738(c.weight) + .38335(c.accel) + -.00045(c.weight * c.accel).
# First, calculate the simple slope for the 'low group' where low corresponds to
# 1 standard deviation below the mean of c.accel (-1 SD = -2.77640).
# You may be confused because we centered the predictor and moderator; but centering
# only changes the location of the mean, it does not change the standard deviation.
sd(c.accel)
sd(accel)
# So, we use the regression equation from above and substitute -2.77640 (one standard
# deviation below the mean) in place of 'c.accel', which results in the following equation:
# y = 23.091 + (-.00738*c.weight) + (.38335*-2.77640) + (-.00045*c.weight*-2.77640)
# which can be reduced down to give us the simple slope we want:
# y = 23.091 + (.38335*-2.77640) + (-.00738*c.weight) + (-.00045*c.weight*-2.77640)
# y = 23.091 + -1.064333 + [-.00738 + (-.00045*-2.77640)]*c.weight
# y = 22.02667 + [-.00738 + .00124938]*c.weight
# y = 22.02667 + -.00613062*c.weight
# So, the simple slope for the 'low group' is -.00613062.
# Second, calculate the simple slope for the 'high group' where high corresponds
# to 1 standard deviation above the mean of c.accel (+1 SD = 2.77640).
# y = 23.091 + (-.00738*c.weight) + (.38335*2.77640) + (-.00045*c.weight*2.77640)
# which can be reduced down to give us the simple slope we wanted:
# y = 24.15533 + -0.00862938*c.weight
# So, the simple slope for the 'high group' is -.00862938.
# Next, we have to calculate the standard error for each simple slope/group.
# To do this; we need to the coefficient covariances.
summary(RegMod.2)$cov.unscaled
# We will need 3 things from this table.
# (1) the variance of the coefficient for c.weight = 4.630701e-09
# (2) the variance of the coefficient for interact = 4.732109e-10
# (3) the COVARIANCE of the coefficients for c.weight and interact = 4.708424e-10
# Now we can calculate the Standard Error of each group specific simple slope using
# the formula:
# std.err. = sqrt((1) + S*S*(2) + 2*S*(3))
# which can also be written as:
# std.err. = sqrt of (4.630701e-09 + S*S*4.732109e-10 + 2*S*4.708424e-10)
# where S corresponds to the group specific value for standard deviation;
# for the low group: S = -2.77640 and
# for the high group: S = 2.77640.
# For the low group: S = -2.77640.
std.err.low = sqrt(4.630701e-09 + -2.77640*-2.77640*4.732109e-10 + 2*-2.77640*4.708424e-10)
std.err.low
# Which equates to just 0.000075259
# For the high group: S = 2.77640.
std.err.high = sqrt(4.630701e-09 + 2.77640*2.77640*4.732109e-10 + 2*2.77640*4.708424e-10)
std.err.high
# Which equates to just 0.000104369
# Finally, we can conduct the t-tests to determine if each group specific simple slope is
# significantly different from zero.
# We calculate our t-value by simply dividing the slope by the standard error (for each group).
# BUT, remember we are doing multiple t-tests here, so we risk inflation of type 1 error rates.
# To overcome this, we must apply a simple Bonferroni correction to our alpha
# level (.05 / 2 comparisons = .025).
# For the low group:
low.slope.t <- -.00613062 / 0.000075259
low.slope.t
# For the high group:
high.slope.t <- -.00862938 / 0.000104369
high.slope.t
# To find our critical t-value, we use degrees of freedom (df) = N - k - 1 where N is the
# number of cases, and k is the number of predictors (including the predictor, moderator,
# & interaction term).
# df = 398 - 3 - 1 = 394
# This df yeilds a two-tailed critical t-value of 3.347468181818182 with alpha = .001.
# We can see that both simple slopes are significantly different from zero.
##########################################################################
# Splitting 'accel' into two equal groups (high & low based on ranking
# or sorting of accel).
a.group <- c(rep(1,199),rep(2,199))
head(cars2)
tail(cars2)
cars3 <- data.frame(cars2)
cars3$rank <- rank(cars3$accel, ties.method = "first")
rownames(cars3) <- cars3$rank
cars4 <- cars3[as.character(sort(cars3$rank)),]
cars5 <- cbind(cars4,a.group)
rm(a.group,cars2,cars3,cars4)
head(cars5)
tail(cars5)
############################################################################
# Graphing the interaction.
# Read what it below the two commands prior to running them.
library(latticist)
latticist(cars5,use.playwith=FALSE)
# SELECT 2: gWidgetscltk
Selection: 2
# Expect errors; but the errors do not prevent functioning.
# When the 'Latticist' GUI pops up, put 'weight' in the 'x=' box,
# 'mpg' in the 'y=' box, 'a.group' in the 'Groups/Color' box AND
# put 'a.group' in the 'Conditioning' box. This should produce three
# graphs, with each group of 'a.group' stacked and the total sample
# (both groups together) to the right. Each graph is also color
# coded so the points of our groups are differentiated.
# If you only want a single graph with both groups color coded
# similar to the third one produced above, then in the Latticist
# GUI:
# Click on the button for "superpose" which will remove
# 'a.group' from the 'Conditioning' box and produce the single
# desired graph with different colors for each group's points and
# their lines.
# There are several ways to display data in the Latticist GUI, so
# spend some time changing things around a bit.
# For instance, try putting 'weight' in the 'x=' box,
# 'mpg' in the 'y=' box, 'accel' in the 'Groups/Color' box AND
# put 'accel' in the 'Conditioning' box.
# As an example of the kinds of graphs one can produce using Latticist,
# copy and paste the following URL into your browser and hit the enter key:
# http://www.unt.edu/rss/class/Jon/R_SC/Module9/LatticistGraph001.pdf