Generalized,Linear,and Mixed Models Charles E. McCulloch, Shayle R.Searle
#p.60 Beta-binomial model
data(anscombe)
Y=as.matrix(anscombe[,colnames(anscombe) %in% c("y1","y2","y3","y4")])
for(j in 1:ncol(Y)){
Y[,j]=ifelse(Y[,j]-mean(Y[,j])>0,1,0)
}
Y=t(Y)
n=rep(ncol(Y),nrow(Y))
m=nrow(Y)
Yi.=apply(Y,1,sum)
#Broyden Quasi-Newton Algorithm p.213
f=function(x){
a=x[1];b=x[2]
da=c();db=c()
for(j in 1:m){
da_sub=sum(1/(a+c(0:(Yi.[j]-1))))-sum(1/(a+b+c(0:(n[j]-1))))
db_sub=sum(1/(b+c(0:(n[j]-Yi.[j]-1))))-sum(1/(a+b+c(0:(n[j]-1))))
da=c(da,da_sub);db=c(db,db_sub)
}
f1=sum(da);f2=sum(db)
return(c(f1,f2))
}
X=rep(1,2)
ite=1000
eta=0.01
H=diag(f(X))
for(l in 1:ite){
if(sum(abs(f(X)))>10^(-9)){
X_pre=X
X=X-eta*H%*%f(X)
s=X-X_pre
y=f(X)-f(X_pre)
H=H+((s-H%*%y)/as.numeric(t(s)%*%H%*%y))%*%t(s)%*%H
print(sum(abs(f(X))))
}
}
alpha=X[1];beta=X[2]
var_pi=alpha*beta/((alpha+beta+1)*(alpha+beta)^2)
mu=alpha/(alpha+beta)
tau=1/(alpha+beta)
var_Yi.=n*mu*(1-mu)*(1+(n-1)/((alpha+beta+1)*2))
#p.109 conditional Inference
data(anscombe)
Y=as.matrix(anscombe[,colnames(anscombe) %in% c("x1","x2","x3","x4")])
for(j in 1:ncol(Y)){
Y[,j]=ifelse(Y[,j]-mean(Y[,j])>0,1,0)
}
XX=as.matrix(anscombe[,colnames(anscombe) %in% c("y1","y2","y3","y4")])
#Broyden Quasi-Newton Algorithm p.213
f=function(x){
b=x[length(x)];a=c(x[-length(x)])
f1=apply(Y,1,sum)-apply(exp(a+b*XX)/(1+exp(a+b*XX)),1,sum)
f2=sum(Y*XX)-sum(XX*exp(a+b*XX)/(1+exp(a+b*XX)))
return(c(f1,f2))
}
X=rep(0.001,nrow(XX)+1)
ite=10^6
eta=0.0001
H=diag(f(X))
for(l in 1:ite){
if(sum(abs(f(X)))>10^(-9)){
X_pre=X
X=X-eta*H%*%f(X)
s=X-X_pre
y=f(X)-f(X_pre)
H=H+((s-H%*%y)/as.numeric(t(s)%*%H%*%y))%*%t(s)%*%H
alpha=c(X[-length(X)])
beta=X[length(X)]
loglik=sum(alpha*apply(Y,1,sum))+beta*sum(Y*XX)-sum(log(1+exp(alpha+beta*XX)))
print(loglik)
#print(sum(abs(f(X))))
}
}
#p.155
x=sample(1:5,20,replace = T)/10
Y=exp(x)
x=cumsum(x)
n=length(x)
h=x[2:length(x)]-x[1:(length(x)-1)]
h_f=function(d){return(exp(d))}
D=array(0,dim=c(n-2,n))
for(j in 1:(n-2)){
D[j,j:(j+2)]=c(1/h[j],-(1/h[j]+1/h[j+1]),1/h[j+1])
}
C=array(0,dim=c(n-2,n-2))
C[1,1]=2*(h[1]+h[2])
C[1,2]=h[2]
C[(n-2),(n-3)]=h[n-2]
C[(n-2),(n-2)]=h[n-1]
for(j in 2:(n-3)){
C[j,(j-1):(j+1)]=c(h[j],2*(h[j]+h[j+1]),h[j+1])
}
K=t(D)%*%solve(C)%*%D
lam=10^(-2)
f_lam=solve(diag(rep(1,n))+lam*K)%*%Y
plot(x,f_lam,xlim=c(min(x),max(x)),ylim=c(min(c(Y,f_lam)),max(c(Y,f_lam))),col=2,type="o",ylab="val")
par(new=T)
plot(x,Y,xlim=c(min(x),max(x)),ylim=c(min(c(Y,f_lam)),max(c(Y,f_lam))),col=3,type="o",ylab="val")
#p.176
x=sample(1:5,20,replace = T)/10
Y=exp(x+1)
x=cumsum(x)
n=length(x)
h=x[2:length(x)]-x[1:(length(x)-1)]
h_f=function(d){return(exp(d))}
D=array(0,dim=c(n-2,n))
for(j in 1:(n-2)){
D[j,j:(j+2)]=c(1/h[j],-(1/h[j]+1/h[j+1]),1/h[j+1])
}
C=array(0,dim=c(n-2,n-2))
C[1,1]=2*(h[1]+h[2])
C[1,2]=h[2]
C[(n-2),(n-3)]=h[n-2]
C[(n-2),(n-2)]=h[n-1]
for(j in 2:(n-3)){
C[j,(j-1):(j+1)]=c(h[j],2*(h[j]+h[j+1]),h[j+1])
}
K=t(D)%*%solve(C)%*%D
lam=10^(-2)
f=rep(1,n);sig=rep(1,n)
ite=100
for(l in 1:ite){
d=(h_f(f+0.01)-h_f(f))/0.01
S=d*(Y-h_f(f))/sig
w=diag(c((d^2)/(sig^2)))
y=solve(w)%*%S+f
f=solve(w+lam*K)%*%w%*%y
}
plot(x,h_f(f),xlim=c(min(x),max(x)),ylim=c(min(c(Y,h_f(f))),max(c(Y,h_f(f)))),col=2,type="o",ylab="val")
par(new=T)
plot(x,Y,xlim=c(min(x),max(x)),ylim=c(min(c(Y,h_f(f))),max(c(Y,h_f(f)))),col=3,type="o",ylab="val")
Author And Source
この問題について(Generalized,Linear,and Mixed Models Charles E. McCulloch, Shayle R.Searle), 我々は、より多くの情報をここで見つけました https://qiita.com/kozakai-ryouta/items/49e7f101cf0038a392b0著者帰属:元の著者の情報は、元のURLに含まれています。著作権は原作者に属する。
Content is automatically searched and collected through network algorithms . If there is a violation . Please contact us . We will adjust (correct author information ,or delete content ) as soon as possible .