rcreg
is used to correct a regression object with a continuous independent variable
measured with error via regression calibration by Carroll et al. (1995).
rcreg( reg = NULL, formula = NULL, data = NULL, weights = NULL, MEvariable = NULL, MEerror = NULL, variance = FALSE, nboot = 400 ) # S3 method for rcreg coef(object, ...) # S3 method for rcreg vcov(object, ...) # S3 method for rcreg sigma(object, ...) # S3 method for rcreg formula(x, ...) # S3 method for rcreg family(object, ...) # S3 method for rcreg predict(object, ...) # S3 method for rcreg model.frame(formula, ...) # S3 method for rcreg print(x, ...) # S3 method for rcreg summary(object, ...) # S3 method for summary.rcreg print(x, digits = 4, ...) # S3 method for rcreg update(object, ..., evaluate = TRUE)
reg | naive regression object. See |
---|---|
formula | regression formula |
data | new dataset for |
weights | new weights for |
MEvariable | variable measured with error |
MEerror | standard deviation of the measurement error |
variance | a logical value. If |
nboot | number of boots for correcting the var-cov matrix of coefficients. Default
is |
object | an object of class |
... | additional arguments |
x | an object of class |
digits | minimal number of significant digits. See print.default. |
evaluate | a logical value. If |
If MEvariable
is not in the regression formula, reg
is returned. If
MEvariable
is a continuous independent variable in the regression formula, an
object of class rcreg
is returned:
the function call,
the naive regression object,
a list of MEvariable
, MEerror
, variance
and nboot
,
coefficient estimates corrected by regression calibration,
the residual standard deviation of a linear regression object corrected by regression calibration,
the var-cov matrix of coefficients corrected by regression calibration,
reg
fitted by lm, glm (with family gaussian
, binomial
or
poisson
), multinom, polr, coxph or
survreg is supported.
coef(rcreg)
: Extract coefficients corrected by rcreg
vcov(rcreg)
: Extract the var-cov matrix of coefficients corrected by rcreg
sigma(rcreg)
: Extract the residual standard deviation of a linear regression object
corrected by rcreg
formula(rcreg)
: Extract the regression formula
family(rcreg)
: Extract the family of a regression of class lm
or glm
predict(rcreg)
: Predict with new data
model.frame(rcreg)
: Extract the model frame
print(rcreg)
: Print results of rcreg
nicely
summary(rcreg)
: Summarize results of rcreg
nicely
update(rcreg)
: Update rcreg
print(summary.rcreg)
: Print summary of rcreg
nicely
Carrol RJ, Ruppert D, Stefanski LA, Crainiceanu C (2006). Measurement Error in Nonlinear Models: A Modern Perspective, Second Edition. London: Chapman & Hall.
if (FALSE) { rm(list=ls()) library(CMAverse) # 2 boots are used for illustration # lm n <- 1000 x1 <- rnorm(n, mean = 5, sd = 3) x2_true <- rnorm(n, mean = 2, sd = 1) error1 <- rnorm(n, mean = 0, sd = 0.5) x2_error <- x2_true + error1 x3 <- rbinom(n, size = 1, prob = 0.4) y <- 1 + 2 * x1 + 4 * x2_true + 2 * x3 + rnorm(n, mean = 0, sd = 2) data <- data.frame(x1 = x1, x2_true = x2_true, x2_error = x2_error, x3 = x3, y = y) reg_naive <- lm(y ~ x1 + x2_error + x3, data = data) reg_true <- lm(y ~ x1 + x2_true + x3, data = data) reg_rc <- rcreg(reg = reg_naive, data = data, MEvariable = "x2_error", MEerror = 0.5, variance = TRUE, nboot = 2) coef(reg_rc) vcov(reg_rc) sigma(reg_rc) formula(reg_rc) family(reg_rc) predict(reg_rc, newdata = data[1, ]) reg_rc_model <- model.frame(reg_rc) reg_rc_update <- update(reg_rc, data = data, weights = rep(1, n)) reg_rc_summ <- summary(reg_rc) #glm n <- 1000 x1 <- rnorm(n, mean = 0, sd = 1) x2_true <- rnorm(n, mean = 1, sd = 1) error1 <- rnorm(n, mean = 0, sd = 0.5) x2_error <- x2_true + error1 x3 <- rbinom(n, size = 1, prob = 0.4) linearpred <- 1 + 0.3 * x1 - 0.5 * x2_true - 0.2 * x3 py <- exp(linearpred) / (1 + exp(linearpred)) y <- rbinom(n, size = 1, prob = py) data <- data.frame(x1 = x1, x2_true = x2_true, x2_error = x2_error, x3 = x3, y = y) reg_naive <- glm(y ~ x1 + x2_error + x3, data = data, family = binomial("logit")) reg_true <- glm(y ~ x1 + x2_true + x3, data = data, family = binomial("logit")) reg_rc <- rcreg(reg = reg_naive, data = data, MEvariable = "x2_error", MEerror = 0.5, variance = TRUE, nboot = 2) # multinom n <- 1000 x1 <- rnorm(n, mean = 0, sd = 1) x2_true <- rnorm(n, mean = 1, sd = 1) error1 <- rnorm(n, mean = 0, sd = 0.5) x2_error <- x2_true + error1 x3 <- rbinom(n, size = 1, prob = 0.4) linearpred1 <- 1 + 0.3 * x1 - 0.5 * x2_true - 0.2 * x3 linearpred2 <- 2 + 1 * x1 - 2 * x2_true - 1 * x3 py2 <- exp(linearpred1) / (1 + exp(linearpred1) + exp(linearpred2)) py3 <- exp(linearpred2) / (1 + exp(linearpred1) + exp(linearpred2)) py1 <- 1 - py2 - py3 y <- sapply(1:n, function(x) sample(size = 1, c(1:3), prob = c(py1[x], py2[x], py3[x]))) data <- data.frame(x1 = x1, x2_true = x2_true, x2_error = x2_error, x3 = x3, y = y) reg_naive <- nnet::multinom(factor(y) ~ x1 + x2_error + x3, data = data) reg_true <- nnet::multinom(factor(y) ~ x1 + x2_true + x3, data = data) reg_rc <- rcreg(reg = reg_naive, data = data, MEvariable = "x2_error", MEerror = 0.5, variance = TRUE, nboot = 2) }