The goal of SIHR is to provide inference procedures in the high-dimensional setting for (1)linear functionals (LF) (Cai, Cai, and Guo 2019) and quadratic functionals (QF)(Guo, Renaux, et al. 2019) in linear regression, (2)linear functional in logistic regression (Guo, Rakshit, et al. 2019), (3) individual treatment effects (ITE) in linear and logistic regression and (4) single regression coefficient in binary outcome regression (Cai, Guo, and Ma, n.d.).
These are basic examples which show how to solve the common high-dimensional inference problems:
library(SIHR)Inference for linear functional in high-dimensional linear regression model
library(MASS)
n = 100
p = 400
A1gen <- function(rho,p){
A1=matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
A1[i,j]<-rho^(abs(i-j))
}
}
A1
}
mu <- rep(0,p)
mu[1:5] <- c(1:5)/5
rho = 0.5
Cov <- (A1gen(rho,p))/2
beta <- rep(0,p)
beta[1:10] <- c(1:10)/5
X <- MASS::mvrnorm(n,mu,Cov)
y = X%*%beta + rnorm(n)
loading <- MASS::mvrnorm(1,rep(0,p),Cov)
Est = SIHR::LF(X = X, y = y, loading = loading)
#> [1] "step is 3"
### Point esitmator
Est$prop.est
#> [,1]
#> [1,] -2.54952
### Standard error
Est$se
#> [1] 2.493051
### Confidence interval
Est$CI
#> [1] -7.43581 2.33677
### test whether the linear functional is below zero or not (1 indicates that it is above zero)
Est$decision
#> [1] 0Individualised Treatment Effect in high-dimensional logistic regression model
n1 = 100
p = 400
n2 = 100
A1gen <- function(rho,p){
A1=matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
A1[i,j]<-rho^(abs(i-j))
}
}
A1
}
mu <- rep(0,p)
rho = 0.5
Cov <- (A1gen(rho,p))/2
beta1 <- rep(0,p)
beta1[1:10] <- c(1:10)/5
beta2 <- rep(0,p)
beta2[1:5] <- c(1:5)/10
X1 <- MASS::mvrnorm(n1,mu,Cov)
X2 <- MASS::mvrnorm(n2,mu,Cov)
y1 = X1%*%beta1 + rnorm(n1)
y2 = X2%*%beta2 + rnorm(n2)
loading <- MASS::mvrnorm(1,rep(0,p),Cov)
Est <- SIHR::ITE(X1 = X1, y1 = y1, X2 = X2, y2 = y2,loading = loading)
#> [1] "step is 3"
#> [1] "step is 3"
### Point esitmator
Est$prop.est
#> [,1]
#> [1,] -1.192825
### Standard error
Est$se
#> [1] 1.915035
### Confidence interval
Est$CI
#> [1] -4.946225 2.560575
### test whether the linear ITE is below zero or not (1 indicates that it is above zero)
Est$decision
#> [1] 0Inference for linear functional in high-dimensional logistic regression model
library(MASS)
A1gen <- function(rho,p){
A1=matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
A1[i,j]<-rho^(abs(i-j))
}
}
A1
}
n = 100
p = 400
mu <- rep(0,p)
rho = 0.5
Cov <- (A1gen(rho,p))/2
beta <- rep(0,p)
beta[1:10] <-0.5*c(1:10)/10
X <- MASS::mvrnorm(n,mu,Cov)
exp_val <- X%*%beta
prob <- exp(exp_val)/(1+exp(exp_val))
y <- rbinom(n,1,prob)
loading <- MASS::mvrnorm(1,mu,Cov)
Est = SIHR::LF_logistic(X = X, y = y, loading = loading, weight = rep(1,n), trans = TRUE)
#> [1] "step is 3"
### trans = TRUE implies target quantity is the case probability
### Point esitmator
Est$prop.est
#> [1] 0.2503934
### Standard error
Est$se
#> [1] 0.5638263
### Confidence interval
Est$CI
#> [1] 0.0009256397 0.9917648061
### test whether the case probability is below 0.5 or not (1 indicates that it is above 0.5)
Est$decision
#> [1] 0Individualised Treatment Effect in high-dimensional logistic model
A1gen <- function(rho,p){
A1=matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
A1[i,j]<-rho^(abs(i-j))
}
}
A1
}
n1 = 100
n2 = 100
p = 400
mu <- rep(0,p)
rho = 0.5
Cov <- (A1gen(rho,p))/2
beta1 <- rep(0,p)
beta1[1:10] <- c(1:10)/5
beta2 <- rep(0,p)
beta2[1:5] <- c(1:5)/10
X1 <- MASS::mvrnorm(n1,mu,Cov)
X2 <- MASS::mvrnorm(n2,mu,Cov)
exp_val1 <- X1%*%beta1
exp_val2 <- X2%*%beta2
prob1 <- exp(exp_val1)/(1+exp(exp_val1))
prob2 <- exp(exp_val2)/(1+exp(exp_val2))
y1 <- rbinom(n1,1,prob1)
y2 <- rbinom(n2,1,prob2)
loading <- MASS::mvrnorm(1,mu,Cov)
Est <- SIHR::ITE_Logistic(X1 = X1, y1 = y1, X2 = X2, y2 = y2,loading = loading, weight = NULL, trans = FALSE)
#> [1] "step is 4"
#> [1] "step is 3"
### trans = FALSE implies the target quantity is the difference between two linear combinations of the regression coefficients
### Point esitmator
Est$prop.est
#> [1] -3.827802
### Standard error
Est$se
#> [1] 37.2344
### Confidence interval
Est$CI
#> [1] -76.80589 69.15029
### test whether the first case probability is smaller than the second case probability or not (1 indicates that the first case probability is larger than the second case probability)
Est$decision
#> [1] 0Inference for single regression coefficient in high-dimensional binary GLM (probit model)
sp = 20
n = 500
p = 800
sig1 = toeplitz(seq(0.6, 0,length.out = p/10))
Sig = Matrix::bdiag(rep(list(sig1),10))+diag(rep(0.4,p))
X = MASS::mvrnorm(n, mu=rep(0,p), Sigma=Sig)
b = rep(0,p)
b[1:sp] = rep(c(0.4,-0.4), sp/2)
## Inference for single regression coefficient in high-dimensional binary probit model
f = function(x){
pnorm(x)
}
prob = f(X %*% b)
y = array(dim = 1)
for(i in 1:n){
y[i] = rbinom(1,1,prob[i])
}
Est = SIHR::GLM_binary(X = X, y = y,index = 1, model = "probit", intercept = FALSE)
#> [1] "step is 3"
### Point esitmator
Est$prop.est
#> [1] 0.5456477
### Standard error
Est$se
#> [1] 0.1193006
### Confidence interval
Est$CI
#> [1] 0.3118229 0.7794725
### test whether the first regression coefficient is equal to zero or not (1 indicates that it is significantly different from zero)
Est$decision
#> [1] 1Inference for single regression coefficient in high-dimensional binary GLM (inverse t1 model)
sp = 10
n = 800
p = 400
sig1 = toeplitz(seq(0.6, 0,length.out = p/10))
Sig = Matrix::bdiag(rep(list(sig1),10))+diag(rep(0.4,p))
X = MASS::mvrnorm(n, mu=rep(0,p), Sigma=Sig)
b = rep(0,p)
b[1:sp] = rep(c(0.4,-0.4), sp/2)
f = function(x){
pt(x,1)
}
prob = f(X %*% b)
y = array(dim = 1)
for(i in 1:n){
y[i] = rbinom(1,1,prob[i])
}
Est = SIHR::GLM_binary(X = X, y = y, index = 2, model = "inverse t1", lambda=0.1*sqrt(log(p)/n))
#> [1] "step is 4"
### Point esitmator
Est$prop.est
#> [1] -0.4599343
### Standard error
Est$se
#> [1] 0.1083813
### Confidence interval
Est$CI
#> [1] -0.6723578 -0.2475107
### test whether the second regression coefficient is equal to zero or not (1 indicates that it is significantly different from zero)
Est$decision
#> [1] 0Inference for quadratic functional in high-dimensional linear model
library(MASS)
A1gen <- function(rho,p){
A1=matrix(0,p,p)
for(i in 1:p){
for(j in 1:p){
A1[i,j]<-rho^(abs(i-j))
}
}
A1
}
rho = 0.6
Cov <- (A1gen(rho,400))
mu <- rep(0,400)
mu[1:5] <- c(1:5)/5
beta <- rep(0,400)
beta[25:50] <- 0.08
X <- MASS::mvrnorm(100,mu,Cov)
y <- X%*%beta + rnorm(100)
test.set <- c(30:100)
## Inference for Quadratic Functional with Population Covariance Matrix in middle
Est = SIHR::QF(X = X, y = y, G=test.set)
#> [1] "step is 5"
### Point esitmator
Est$prop.est
#> [,1]
#> [1,] 0.4856643
### Standard error
Est$se
#> [1] 0.1339019
### Confidence interval
Est$CI
#> [,1] [,2]
#> [1,] 0.2232215 0.7481071
### test whether the quadratic form is equal to zero or not (1 indicates that it is above zero)
Est$decision
#> [1] 1
## Inference for Quadratic Functional with known matrix A in middle
Est = SIHR::QF(X = X, y = y, G=test.set, Cov.weight = FALSE,A = diag(1:length(test.set),length(test.set)))
#> [1] "step is 3"
### Point esitmator
Est$prop.est
#> [,1]
#> [1,] 7.818209
### Standard error
Est$se
#> [1] 1.874088
### Confidence interval
Est$CI
#> [,1] [,2]
#> [1,] 4.145065 11.49135
### test whether the quadratic form is equal to zero or not (1 indicates that it is above zero)
Est$decision
#> [1] 1
## Inference for square norm of regression vector
Est = SIHR::QF(X = X, y = y, G=test.set, Cov.weight = FALSE, A = diag(length(test.set)))
#> [1] "step is 3"
### Point esitmator
Est$prop.est
#> [,1]
#> [1,] 0.2305753
### Standard error
Est$se
#> [1] 0.1078509
### Confidence interval
Est$CI
#> [,1] [,2]
#> [1,] 0.0191914 0.4419591
### test whether the quadratic form is equal to zero or not (1 indicates that it is above zero)
Est$decision
#> [1] 1Finding projection direction in high dimensional linear regression
n = 100
p = 400
X = matrix(sample(-2:2,n*p,replace = TRUE),nrow = n,ncol = p)
resol = 1.5
step = 3
## Finding Projection Direction using fixed tuning parameter
Direction.est <- SIHR::Direction_fixedtuning(X,loading=c(1,rep(0,(p-1))),mu=sqrt(2.01*log(p)/n)*resol^{-(step-1)})
### First 20 entries of the projection vector
Direction.est$proj[1:20]
#> [1] 1.027419e+00 1.137467e-21 -3.249525e-21 -5.040831e-21 -4.798542e-21
#> [6] 3.652147e-21 -1.541715e-21 3.920373e-03 1.604445e-21 4.330363e-21
#> [11] 5.887975e-21 3.153287e-21 -7.296738e-22 -7.101951e-22 -1.363728e-21
#> [16] -5.406470e-21 1.762665e-21 -1.155885e-20 4.894473e-21 -4.830918e-21
## Finding Projection Direction using best step size
Direction.est <- SIHR::Direction_searchtuning(X,loading=c(1,rep(0,(p-1))))
### First 20 entries of the projection vector
Direction.est$proj[1:20]
#> [1] 1.027419e+00 1.100801e-21 -3.254499e-21 -4.997330e-21 -4.751100e-21
#> [6] 3.650647e-21 -1.485238e-21 3.920373e-03 1.548722e-21 4.339941e-21
#> [11] 5.863320e-21 3.217438e-21 -6.788858e-22 -6.577332e-22 -1.311394e-21
#> [16] -5.395191e-21 1.769135e-21 -1.150573e-20 4.907870e-21 -4.843679e-21Finding projection direction in high dimensional logistic regression
n = 50
p = 400
X = matrix(sample(-2:2,n*p,replace = TRUE),nrow=n,ncol=p)
y = rbinom(n,1,0.5)
col.norm <- 1/sqrt((1/n)*diag(t(X)%*%X)+0.0001);
Xnor <- X %*% diag(col.norm);
fit = glmnet::cv.glmnet(Xnor, y, alpha=1,family = "binomial")
htheta <- as.vector(coef(fit, s = "lambda.min"))
support<-(abs(htheta)>0.001)
Xb <- cbind(rep(1,n),Xnor);
Xc <- cbind(rep(1,n),X);
col.norm <- c(1,col.norm);
pp <- (p+1);
xnew = c(1,rep(0,(p-1)))
loading=rep(0,pp)
loading[1]=1
loading[-1]=xnew
htheta <- htheta*col.norm;
htheta <- as.vector(htheta)
f_prime <- exp(Xc%*%htheta)/(1+exp(Xc%*%htheta))^2
step <- 2
## Finding Projection Direction using fixed tuning parameter
Direction.est <- SIHR::Direction_fixedtuning(X,loading=c(1,rep(0,(p-1))),mu=sqrt(2.01*log(p)/n)*resol^{-(step-1)},model = "logistic",weight = 1/f_prime, deriv.vec = f_prime)
### First 20 entries of the projection vector
Direction.est$proj[1:20]
#> [1] 3.234706e-01 5.235360e-23 -1.156472e-23 4.456602e-23 -4.399074e-23
#> [6] -7.384066e-24 1.794255e-24 1.385838e-23 -2.126070e-25 2.911442e-23
#> [11] -8.377458e-24 -2.552474e-23 3.018341e-23 -1.800046e-23 2.677253e-23
#> [16] -5.286996e-23 1.208285e-23 -1.330125e-22 8.063287e-24 1.428302e-24
## Finding Projection Direction using best step size
Direction.est <- SIHR::Direction_searchtuning(Xc,loading,model = "logistic",weight = 1/f_prime, deriv.vec = f_prime)
### First 20 entries of the projection vector
Direction.est$proj[1:20]
#> [1] 3.701162e-01 2.686621e-01 -1.862471e-22 1.715356e-23 1.006911e-22
#> [6] -9.708787e-24 1.974327e-22 2.745049e-22 -7.526144e-23 1.130423e-22
#> [11] 1.101680e-22 -1.391776e-22 -3.206116e-22 -2.677714e-23 -3.981109e-22
#> [16] -1.707152e-22 -1.623773e-22 3.197569e-22 -4.557997e-22 -1.446905e-22Cai, Tianxi, T. Tony Cai, and Zijian Guo. 2019. “Optimal Statistical Inference for Individualized Treatment Effects in High-Dimensional Models.” Journal of the Royal Statistical Society: Series B. https://arxiv.org/pdf/1904.12891.pdf.
Cai, T Tony, Zijian Guo, and Rong Ma. n.d. “Statistical Inference for High-Dimensional Generalized Linear Models with Binary Outcomes.” Journal of the American Statistical Association.
Guo, Zijian, Prabrisha Rakshit, Daniel S. Herman, and Jinbo Chen. 2019. “Inference for Case Probability in High-Dimensional Logistic Regression.” Unknown. https://arxiv.org/abs/2012.07133.
Guo, Zijian, Claude Renaux, Peter Buhlmann, and T. Tony Cai. 2019. “Group Inference in High Dimensions with Applications to Hierarchical Testing.” Unknown. https://arxiv.org/pdf/1909.01503.pdf.