norm.Z.i <- apply(Z,2, function(x) sqrt(sum(x^2)))^2
norm.X.i <- apply(X,2, function(x) sqrt(sum(x^2)))^2
ntot <- nburn + thin*nsave
save.set <- seq(nburn+thin, ntot, thin)
###------------------------ Storage matrices --------------------------------###
beta.diff.store <- beta.store <- beta.sps.store <- array(NA,c(length(save.set),T,K))
A.store <- A.sps.store <- array(NA,c(length(save.set),K))
sigma2.store <- array(NA,c(length(save.set), T))
fit.store <- array(NA,c(length(save.set), T))
log.lambda.store <- array(NA,c(length(save.set),T))
v0.diag.store <- ind.store <- array(NA, c(length(save.set), T, K))
prob.store <- array(NA, c(length(save.set),1))
save.int <- 0
pb <- txtProgressBar(min = 0, max = ntot, style = 3) # Start progress bar
irep <- 1
###------------------------------ Step 1: -----------------------------------###
###-------------- Sample time-invariant part of the model -------------------###
###--------------------------------------------------------------------------###
# Step 1.1: Non-sparsified coefficients
X.cons <- X/sigma
Y.cons <- (y - Z%*%b.tilde.hat)/sigma
if (K > 1){
V.post <- solve(crossprod(X.cons) + V0.cons.inv)
}else{
V.post <- try(solve(crossprod(X.cons) + V0.cons.inv), silent = T)
if (is(V.post,"try-error")) V1 <- ginv(crossprod(X.cons) + V0.cons.inv)
}
A.post <- V.post%*%crossprod(X.cons,Y.cons)
A.draw <- try(A.post + t(chol(V.post))%*%rnorm(K), silent = T)
if (is(A.draw, "try-error")) A.draw <- mvrnorm(1, A.post, V.post)
# Step 1.2: SAVS step: zeta = 1, nu = nu
A.sparse <- matrix(0, K,1)
mu.jj <- 1/(abs(A.draw))^nu
A.sl.null <- (abs(A.draw)*norm.X.i) < mu.jj
A.sparse[!A.sl.null] <- (sign(A.draw)* 1/norm.X.i * ((abs(A.draw)*norm.X.i)-mu.jj))[!A.sl.null]
###------------------------------ Step 2: -----------------------------------###
###---- Sample HS hyperparameters for prior on the constant coefficients ----###
###--------------------------------------------------------------------------###
if(prior.type == "HS"){
hs_draw <- get.hs(bdraw=as.numeric(A.draw),lambda.hs=lambda.A.cons,nu.hs=nu.A.cons,tau.hs=tau.A.cons,zeta.hs=zeta.A.cons)
lambda.A.cons <- hs_draw$lambda
nu.A.cons <- hs_draw$nu
tau.A.cons <- hs_draw$tau
zeta.A.cons <- hs_draw$zeta
psi.cons <- hs_draw$psi+1e-8
}
V0.cons <- diag(psi.cons)
V0.cons.inv <- diag(1/psi.cons)
###------------------------------ Step 3: -----------------------------------###
###--------- Sample TVPs using the Bhattacharya et al algorithm -------------###
###--------------------------------------------------------------------------###
if(tvp){
# Normalizing step (substract constant part, divide through error volatility)
y_ <- (y - X%*%A.draw)/sigma
X_ <- Matrix::bdiag(Z)/sigma
# Sampling from (manipulated) prior variance-covariance matrix
v0.diag.restr <- v0.diag
v0.diag.restr[!ind] <- off.set.V0
u <- rnorm(k, 0, sqrt(v0.diag.restr)) # u ~ N(0, V0)
XV0.full <- t(X_)*v0.diag.restr # V0 is diagonal!
delta <- rnorm(T, 0, 1) # delta ~ N(0, I)
v <- X_%*%u + delta   # v = X_*u + delta
if (sum(ind)==0){#Speed further up computations by noting that Mz = I_T
w2 <- (y_ - v)
}else{# Restricted X_ for approximating inverse, dropped columns correspond to zero coefficients
X__ <- Matrix::Matrix(X_[,ind, drop = F], sparse = TRUE)
XV0.restr <- t(X__)*v0.diag.restr[ind]   # Create full and restricted X%*%V0
Mz <- X__%*%XV0.restr+I_T
if(rw.st) w2 <- Matrix::solve(Mz, (y_ - v)) else w2 <- forwardsolve(Mz, (y_ - v))
}
b.tilde.hat <- as.vector(u + XV0.full%*%w2)
b.tilde.mat <- t(matrix(b.tilde.hat,K,T))
###------------------------------ Step 4: -----------------------------------###
###------ Sample HS hyperparameters for prior on the TVP coefficients -------###
###--------------------------------------------------------------------------###
if(prior.type == "HS"){
vecht <- rep(lambda.t, each = K)
b.tilde.hs <- t(matrix(b.tilde.hat/sqrt(vecht),K,T))
for(pp in 1:K){
hs_draw <- get.hs(bdraw=b.tilde.hs[,pp],lambda.hs=lambda.A[,pp],nu.hs=nu.A[,pp],tau.hs=tau.A[1,pp],zeta.hs=zeta.A[1,pp])
lambda.A[,pp] <- hs_draw$lambda
nu.A[,pp] <- hs_draw$nu
tau.A[,pp] <- hs_draw$tau
zeta.A[,pp] <- hs_draw$zeta
psi.tvp.mat[,pp] <- hs_draw$psi
}
psi.tvp <- as.vector(t(psi.tvp.mat)) + 1e-8
# create full prior variance matrix v0_t = lambda_t*psi.tvp
v0.diag <- rep(lambda.t, each = K)*psi.tvp
v0.diag[v0.diag > max.chg] <- max.chg[v0.diag > max.chg]
}else{
psi.tvp <- max.chg
v0.diag <- rep(lambda.t, each = K)*psi.tvp
}
###------------------------------ Step 5: -----------------------------------###
###----------------------- SAVS step for b.tilde.hat ------------------------###
###--------------------------------------------------------------------------###
b.sparse <- matrix(0, TK,1)
mu.jj <- log(zeta) - nu*log(abs(b.tilde.hat))
mu.jj <- exp(mu.jj)
sl.null <- (abs(b.tilde.hat)*norm.Z.i) < mu.jj
if(sum(sl.null) < ceiling(length(sl.null)*tvp.perc)){
sl.temp <- (abs(b.tilde.hat)*norm.Z.i) - mu.jj
sl.null <- (sl.temp > quantile(sl.temp, (1-tvp.perc)))
}
b.sparse[!sl.null] <- (sign(b.tilde.hat)* 1/norm.Z.i * ((abs(b.tilde.hat)*norm.Z.i)-mu.jj))[!sl.null]
###------------------------------ Step 6: -----------------------------------###
###-------------- Create indicator to restrict Z and V0   -------------------###
###--------------------------------------------------------------------------###
if(approx.type == "none" || irep < train.ind*nburn)
{
ind <- rep(TRUE, (T*K))
}else if(sum(ind) <= length(ind)*tvp.perc){
ind <- (v0.diag > quantile(v0.diag, (1-tvp.perc)))
}else if(approx.type == "savs"){
ind <- !sl.null
}else if(approx.type == "thrsh"){
ind <- (v0.diag>thrs)
}
###------------------------------ Step 7: -----------------------------------###
###------------ Sample the global TVP shrinkage parameter -------------------###
###--------------------------------------------------------------------------###
if (evol_error %in% c("svol-z", "svol-n")){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- sqrt(apply((b.tilde.mat/scale.mat)^2,1,sum))
# Global shrinkage parameter (across all t's and i's)
if(evol_error == "svol-z"){
evolParams <- sampleEvolParams(omega = omega, evolParams = evolParams ,  sigma_e = 1, evol_error = "DHS", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}else if(evol_error == "svol-n"){
evolParams <- sampleEvolParams(omega, evolParams,  sigma_e = 1, evol_error = "SV", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}
lambda.t <- exp(evolParams$ht-evolParams$dhs_mean)
tau <- exp(evolParams$dhs_mean)
tau[tau > off.set.max] <- off.set.max
lambda.t <- lambda.t*tau
}else if (evol_error == "Mix"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st <- matrix(0, T,1)
for (t in seq_len(T)){
pt0 <- prod(dnorm(omega[t,], 0, sqrt(tau0sq)))*(1-p.prior) + 1e-50
pt1 <- prod(dnorm(omega[t,], 0, sqrt(tau1sq)))*(p.prior) + 1e-50
pt <- pt1/(pt0+pt1)
if (runif(1) > pt){
st[t] <- 1
sig.t <- tau1sq
}else{
sig.t <- tau0sq
}
lambda.t[t] <- sig.t
}
p.prior <- rbeta(1, sum(st)+e.0, (T-sum(st))+e.1)
}else if (evol_error =="MS"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st_filt <- hamiltonfilter(sqrt(tau0sq),sqrt(tau1sq),ppnew,qqnew,omega)
fprob <- st_filt$fprob;liki <- st_filt$lik
s_sample <- getS(fprob,ppnew,qqnew,ncrit,MaxTrys)
problems <- s_sample$prob;st <- s_sample$ST
if (problems==1) st <- s00 else s00 <- st
ms.tr <- table(factor(paste0(head(st,-1),tail(st,-1)), levels = c("00", "01", "10", "11")))
p.draw <- rbeta(1, e.1.1 + ms.tr["11"], e.1.0 + ms.tr["10"])
q.draw <- rbeta(1, e.0.0 + ms.tr["00"], e.0.1 + ms.tr["01"])
ppnew <- rep(p.draw, T)
qqnew <- rep(q.draw, T)
lambda.t <- st*tau1sq + (1-st)*tau0sq
}else if(evol_error == "static"){
lambda.t <- rep(1, T)
}
if(irep < train.ind*nburn) lambda.t[] <- 1
}else{
b.tilde.hat  <- b.sparse <- rep(0, TK)
ind <- rep(FALSE, TK)
b.tilde.mat <- t(matrix(b.tilde.hat,K,T))
}
# Normalizing step (substract constant part, divide through error volatility)
y_ <- (y - X%*%A.draw)/sigma
X_ <- Matrix::bdiag(Z)/sigma
# Sampling from (manipulated) prior variance-covariance matrix
v0.diag.restr <- v0.diag
v0.diag.restr[!ind] <- off.set.V0
u <- rnorm(k, 0, sqrt(v0.diag.restr)) # u ~ N(0, V0)
XV0.full <- t(X_)*v0.diag.restr # V0 is diagonal!
delta <- rnorm(T, 0, 1) # delta ~ N(0, I)
v <- X_%*%u + delta   # v = X_*u + delta
if (sum(ind)==0){#Speed further up computations by noting that Mz = I_T
w2 <- (y_ - v)
}else{# Restricted X_ for approximating inverse, dropped columns correspond to zero coefficients
X__ <- Matrix::Matrix(X_[,ind, drop = F], sparse = TRUE)
XV0.restr <- t(X__)*v0.diag.restr[ind]   # Create full and restricted X%*%V0
Mz <- X__%*%XV0.restr+I_T
if(rw.st) w2 <- Matrix::solve(Mz, (y_ - v)) else w2 <- forwardsolve(Mz, (y_ - v))
}
b.tilde.hat <- as.vector(u + XV0.full%*%w2)
b.tilde.mat <- t(matrix(b.tilde.hat,K,T))
###------------------------------ Step 4: -----------------------------------###
###------ Sample HS hyperparameters for prior on the TVP coefficients -------###
###--------------------------------------------------------------------------###
if(prior.type == "HS"){
vecht <- rep(lambda.t, each = K)
b.tilde.hs <- t(matrix(b.tilde.hat/sqrt(vecht),K,T))
for(pp in 1:K){
hs_draw <- get.hs(bdraw=b.tilde.hs[,pp],lambda.hs=lambda.A[,pp],nu.hs=nu.A[,pp],tau.hs=tau.A[1,pp],zeta.hs=zeta.A[1,pp])
lambda.A[,pp] <- hs_draw$lambda
nu.A[,pp] <- hs_draw$nu
tau.A[,pp] <- hs_draw$tau
zeta.A[,pp] <- hs_draw$zeta
psi.tvp.mat[,pp] <- hs_draw$psi
}
psi.tvp <- as.vector(t(psi.tvp.mat)) + 1e-8
# create full prior variance matrix v0_t = lambda_t*psi.tvp
v0.diag <- rep(lambda.t, each = K)*psi.tvp
v0.diag[v0.diag > max.chg] <- max.chg[v0.diag > max.chg]
}else{
psi.tvp <- max.chg
v0.diag <- rep(lambda.t, each = K)*psi.tvp
}
###------------------------------ Step 5: -----------------------------------###
###----------------------- SAVS step for b.tilde.hat ------------------------###
###--------------------------------------------------------------------------###
b.sparse <- matrix(0, TK,1)
mu.jj <- log(zeta) - nu*log(abs(b.tilde.hat))
mu.jj <- exp(mu.jj)
sl.null <- (abs(b.tilde.hat)*norm.Z.i) < mu.jj
if(sum(sl.null) < ceiling(length(sl.null)*tvp.perc)){
sl.temp <- (abs(b.tilde.hat)*norm.Z.i) - mu.jj
sl.null <- (sl.temp > quantile(sl.temp, (1-tvp.perc)))
}
b.sparse[!sl.null] <- (sign(b.tilde.hat)* 1/norm.Z.i * ((abs(b.tilde.hat)*norm.Z.i)-mu.jj))[!sl.null]
###------------------------------ Step 6: -----------------------------------###
###-------------- Create indicator to restrict Z and V0   -------------------###
###--------------------------------------------------------------------------###
if(approx.type == "none" || irep < train.ind*nburn)
{
ind <- rep(TRUE, (T*K))
}else if(sum(ind) <= length(ind)*tvp.perc){
ind <- (v0.diag > quantile(v0.diag, (1-tvp.perc)))
}else if(approx.type == "savs"){
ind <- !sl.null
}else if(approx.type == "thrsh"){
ind <- (v0.diag>thrs)
}
evol_error
###------------------------------ Step 7: -----------------------------------###
###------------ Sample the global TVP shrinkage parameter -------------------###
###--------------------------------------------------------------------------###
if (evol_error %in% c("svol-z", "svol-n")){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- sqrt(apply((b.tilde.mat/scale.mat)^2,1,sum))
# Global shrinkage parameter (across all t's and i's)
if(evol_error == "svol-z"){
evolParams <- sampleEvolParams(omega = omega, evolParams = evolParams ,  sigma_e = 1, evol_error = "DHS", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}else if(evol_error == "svol-n"){
evolParams <- sampleEvolParams(omega, evolParams,  sigma_e = 1, evol_error = "SV", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}
lambda.t <- exp(evolParams$ht-evolParams$dhs_mean)
tau <- exp(evolParams$dhs_mean)
tau[tau > off.set.max] <- off.set.max
lambda.t <- lambda.t*tau
}else if (evol_error == "Mix"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st <- matrix(0, T,1)
for (t in seq_len(T)){
pt0 <- prod(dnorm(omega[t,], 0, sqrt(tau0sq)))*(1-p.prior) + 1e-50
pt1 <- prod(dnorm(omega[t,], 0, sqrt(tau1sq)))*(p.prior) + 1e-50
pt <- pt1/(pt0+pt1)
if (runif(1) > pt){
st[t] <- 1
sig.t <- tau1sq
}else{
sig.t <- tau0sq
}
lambda.t[t] <- sig.t
}
p.prior <- rbeta(1, sum(st)+e.0, (T-sum(st))+e.1)
}else if (evol_error =="MS"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st_filt <- hamiltonfilter(sqrt(tau0sq),sqrt(tau1sq),ppnew,qqnew,omega)
fprob <- st_filt$fprob;liki <- st_filt$lik
s_sample <- getS(fprob,ppnew,qqnew,ncrit,MaxTrys)
problems <- s_sample$prob;st <- s_sample$ST
if (problems==1) st <- s00 else s00 <- st
ms.tr <- table(factor(paste0(head(st,-1),tail(st,-1)), levels = c("00", "01", "10", "11")))
p.draw <- rbeta(1, e.1.1 + ms.tr["11"], e.1.0 + ms.tr["10"])
q.draw <- rbeta(1, e.0.0 + ms.tr["00"], e.0.1 + ms.tr["01"])
ppnew <- rep(p.draw, T)
qqnew <- rep(q.draw, T)
lambda.t <- st*tau1sq + (1-st)*tau0sq
}else if(evol_error == "static"){
lambda.t <- rep(1, T)
}
lambda.t
ms.tr
###------------------------------ Step 7: -----------------------------------###
###------------ Sample the global TVP shrinkage parameter -------------------###
###--------------------------------------------------------------------------###
if (evol_error %in% c("svol-z", "svol-n")){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- sqrt(apply((b.tilde.mat/scale.mat)^2,1,sum))
# Global shrinkage parameter (across all t's and i's)
if(evol_error == "svol-z"){
evolParams <- sampleEvolParams(omega = omega, evolParams = evolParams ,  sigma_e = 1, evol_error = "DHS", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}else if(evol_error == "svol-n"){
evolParams <- sampleEvolParams(omega, evolParams,  sigma_e = 1, evol_error = "SV", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}
lambda.t <- exp(evolParams$ht-evolParams$dhs_mean)
tau <- exp(evolParams$dhs_mean)
tau[tau > off.set.max] <- off.set.max
lambda.t <- lambda.t*tau
}else if (evol_error == "Mix"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st <- matrix(0, T,1)
for (t in seq_len(T)){
pt0 <- prod(dnorm(omega[t,], 0, sqrt(tau0sq)))*(1-p.prior) + 1e-50
pt1 <- prod(dnorm(omega[t,], 0, sqrt(tau1sq)))*(p.prior) + 1e-50
pt <- pt1/(pt0+pt1)
if (runif(1) > pt){
st[t] <- 1
sig.t <- tau1sq
}else{
sig.t <- tau0sq
}
lambda.t[t] <- sig.t
}
p.prior <- rbeta(1, sum(st)+e.0, (T-sum(st))+e.1)
}else if (evol_error =="MS"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st_filt <- hamiltonfilter(sqrt(tau0sq),sqrt(tau1sq),ppnew,qqnew,omega)
fprob <- st_filt$fprob;liki <- st_filt$lik
s_sample <- getS(fprob,ppnew,qqnew,ncrit,MaxTrys)
problems <- s_sample$prob;st <- s_sample$ST
if (problems==1) st <- s00 else s00 <- st
ms.tr <- table(factor(paste0(head(st,-1),tail(st,-1)), levels = c("00", "01", "10", "11")))
p.draw <- rbeta(1, e.1.1 + ms.tr["11"], e.1.0 + ms.tr["10"])
q.draw <- rbeta(1, e.0.0 + ms.tr["00"], e.0.1 + ms.tr["01"])
ppnew <- rep(p.draw, T)
qqnew <- rep(q.draw, T)
lambda.t <- st*tau1sq + (1-st)*tau0sq
}else if(evol_error == "static"){
lambda.t <- rep(1, T)
}
lambda.t
ms.tr <- table(factor(paste0(head(st,-1),tail(st,-1)), levels = c("00", "01", "10", "11")))
ms.tr
p.draw <- rbeta(1, e.1.1 + ms.tr["11"], e.1.0 + ms.tr["10"])
p.draw
q.draw <- rbeta(1, e.0.0 + ms.tr["00"], e.0.1 + ms.tr["01"])
ppnew <- rep(p.draw, T)
qqnew <- rep(q.draw, T)
lambda.t <- st*tau1sq + (1-st)*tau0sq
lambda.t
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st_filt <- hamiltonfilter(sqrt(tau0sq),sqrt(tau1sq),ppnew,qqnew,omega)
fprob <- st_filt$fprob;liki <- st_filt$lik
s_sample <- getS(fprob,ppnew,qqnew,ncrit,MaxTrys)
s_sample
problems <- s_sample$prob;st <- s_sample$ST
problems
if (problems==1) st <- s00 else s00 <- st
s00
st
e.1.0
e.0.1
ms.tr
ms.tr
e.0.0
e.1.1
q.draw
p.draw <- rbeta(1, e.1.1 + ms.tr["11"], e.1.0 + ms.tr["10"])
p.draw
q.draw <- rbeta(1, e.0.0 + ms.tr["00"], e.0.1 + ms.tr["01"])
q.draw
###------------------------------ Step 3: -----------------------------------###
###--------- Sample TVPs using the Bhattacharya et al algorithm -------------###
###--------------------------------------------------------------------------###
if(tvp){
# Normalizing step (substract constant part, divide through error volatility)
y_ <- (y - X%*%A.draw)/sigma
X_ <- Matrix::bdiag(Z)/sigma
# Sampling from (manipulated) prior variance-covariance matrix
v0.diag.restr <- v0.diag
v0.diag.restr[!ind] <- off.set.V0
u <- rnorm(k, 0, sqrt(v0.diag.restr)) # u ~ N(0, V0)
XV0.full <- t(X_)*v0.diag.restr # V0 is diagonal!
delta <- rnorm(T, 0, 1) # delta ~ N(0, I)
v <- X_%*%u + delta   # v = X_*u + delta
if (sum(ind)==0){#Speed further up computations by noting that Mz = I_T
w2 <- (y_ - v)
}else{# Restricted X_ for approximating inverse, dropped columns correspond to zero coefficients
X__ <- Matrix::Matrix(X_[,ind, drop = F], sparse = TRUE)
XV0.restr <- t(X__)*v0.diag.restr[ind]   # Create full and restricted X%*%V0
Mz <- X__%*%XV0.restr+I_T
if(rw.st) w2 <- Matrix::solve(Mz, (y_ - v)) else w2 <- forwardsolve(Mz, (y_ - v))
}
b.tilde.hat <- as.vector(u + XV0.full%*%w2)
b.tilde.mat <- t(matrix(b.tilde.hat,K,T))
###------------------------------ Step 4: -----------------------------------###
###------ Sample HS hyperparameters for prior on the TVP coefficients -------###
###--------------------------------------------------------------------------###
if(prior.type == "HS"){
vecht <- rep(lambda.t, each = K)
b.tilde.hs <- t(matrix(b.tilde.hat/sqrt(vecht),K,T))
for(pp in 1:K){
hs_draw <- get.hs(bdraw=b.tilde.hs[,pp],lambda.hs=lambda.A[,pp],nu.hs=nu.A[,pp],tau.hs=tau.A[1,pp],zeta.hs=zeta.A[1,pp])
lambda.A[,pp] <- hs_draw$lambda
nu.A[,pp] <- hs_draw$nu
tau.A[,pp] <- hs_draw$tau
zeta.A[,pp] <- hs_draw$zeta
psi.tvp.mat[,pp] <- hs_draw$psi
}
psi.tvp <- as.vector(t(psi.tvp.mat)) + 1e-8
# create full prior variance matrix v0_t = lambda_t*psi.tvp
v0.diag <- rep(lambda.t, each = K)*psi.tvp
v0.diag[v0.diag > max.chg] <- max.chg[v0.diag > max.chg]
}else{
psi.tvp <- max.chg
v0.diag <- rep(lambda.t, each = K)*psi.tvp
}
###------------------------------ Step 5: -----------------------------------###
###----------------------- SAVS step for b.tilde.hat ------------------------###
###--------------------------------------------------------------------------###
b.sparse <- matrix(0, TK,1)
mu.jj <- log(zeta) - nu*log(abs(b.tilde.hat))
mu.jj <- exp(mu.jj)
sl.null <- (abs(b.tilde.hat)*norm.Z.i) < mu.jj
if(sum(sl.null) < ceiling(length(sl.null)*tvp.perc)){
sl.temp <- (abs(b.tilde.hat)*norm.Z.i) - mu.jj
sl.null <- (sl.temp > quantile(sl.temp, (1-tvp.perc)))
}
b.sparse[!sl.null] <- (sign(b.tilde.hat)* 1/norm.Z.i * ((abs(b.tilde.hat)*norm.Z.i)-mu.jj))[!sl.null]
###------------------------------ Step 6: -----------------------------------###
###-------------- Create indicator to restrict Z and V0   -------------------###
###--------------------------------------------------------------------------###
if(approx.type == "none" || irep < train.ind*nburn)
{
ind <- rep(TRUE, (T*K))
}else if(sum(ind) <= length(ind)*tvp.perc){
ind <- (v0.diag > quantile(v0.diag, (1-tvp.perc)))
}else if(approx.type == "savs"){
ind <- !sl.null
}else if(approx.type == "thrsh"){
ind <- (v0.diag>thrs)
}
###------------------------------ Step 7: -----------------------------------###
###------------ Sample the global TVP shrinkage parameter -------------------###
###--------------------------------------------------------------------------###
if (evol_error %in% c("svol-z", "svol-n")){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- sqrt(apply((b.tilde.mat/scale.mat)^2,1,sum))
# Global shrinkage parameter (across all t's and i's)
if(evol_error == "svol-z"){
evolParams <- sampleEvolParams(omega = omega, evolParams = evolParams ,  sigma_e = 1, evol_error = "DHS", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}else if(evol_error == "svol-n"){
evolParams <- sampleEvolParams(omega, evolParams,  sigma_e = 1, evol_error = "SV", error_type = "logXiK", m_st = log(K) - 1/K, v_st2 = 2/K)
}
lambda.t <- exp(evolParams$ht-evolParams$dhs_mean)
tau <- exp(evolParams$dhs_mean)
tau[tau > off.set.max] <- off.set.max
lambda.t <- lambda.t*tau
}else if (evol_error == "Mix"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st <- matrix(0, T,1)
for (t in seq_len(T)){
pt0 <- prod(dnorm(omega[t,], 0, sqrt(tau0sq)))*(1-p.prior) + 1e-50
pt1 <- prod(dnorm(omega[t,], 0, sqrt(tau1sq)))*(p.prior) + 1e-50
pt <- pt1/(pt0+pt1)
if (runif(1) > pt){
st[t] <- 1
sig.t <- tau1sq
}else{
sig.t <- tau0sq
}
lambda.t[t] <- sig.t
}
p.prior <- rbeta(1, sum(st)+e.0, (T-sum(st))+e.1)
}else if (evol_error =="MS"){
scale.mat <- t(matrix(sqrt(psi.tvp),K,T))
omega <- b.tilde.mat/scale.mat
st_filt <- hamiltonfilter(sqrt(tau0sq),sqrt(tau1sq),ppnew,qqnew,omega)
fprob <- st_filt$fprob;liki <- st_filt$lik
s_sample <- getS(fprob,ppnew,qqnew,ncrit,MaxTrys)
problems <- s_sample$prob;st <- s_sample$ST
if (problems==1) st <- s00 else s00 <- st
ms.tr <- table(factor(paste0(head(st,-1),tail(st,-1)), levels = c("00", "01", "10", "11")))
p.draw <- rbeta(1, e.1.1 + ms.tr["11"], e.1.0 + ms.tr["10"])
q.draw <- rbeta(1, e.0.0 + ms.tr["00"], e.0.1 + ms.tr["01"])
ppnew <- rep(p.draw, T)
qqnew <- rep(q.draw, T)
lambda.t <- st*tau1sq + (1-st)*tau0sq
}else if(evol_error == "static"){
lambda.t <- rep(1, T)
}
if(irep < train.ind*nburn) lambda.t[] <- 1
}else{
b.tilde.hat  <- b.sparse <- rep(0, TK)
ind <- rep(FALSE, TK)
b.tilde.mat <- t(matrix(b.tilde.hat,K,T))
}
b.tilde.smp <- b.tilde.mat
if(rw.st) b.tilde.mat <- apply(b.tilde.mat, 2,cumsum)
source("~/Dropbox/!Working papers/Fast sampling/!!Tex/!SNDE/Accept/SNDE-Replication/estim-eqbyeq-main.R")
source("~/Dropbox/!Working papers/Fast sampling/!!Tex/!SNDE/Accept/SNDE-Replication/estim-eqbyeq-main.R")
source("~/Dropbox/!Working papers/Fast sampling/!!Tex/!SNDE/Accept/SNDE-Replication/estim-eqbyeq-main.R")
source("~/Dropbox/!Working papers/Fast sampling/!!Tex/!SNDE/Accept/SNDE-Replication/collect-VAR.R")
source("~/Dropbox/!Working papers/Fast sampling/!!Tex/!SNDE/Accept/SNDE-Replication/collect-VAR.R")
View(spec.list)
