オンライン実験の案内

オンライン実験の案内です。
様々な都市または国に関する諸課題を実施します。詳細は下のリンクをクリックしてください。
参加者数の人数制限もあり、認知情報科学発展実習・特別実習(および11月25日までは認知情報科学基礎)の受講者のみを対象としています。これらを受講していな方には謝金を支払うことは出来ませんので、ご了承ください。

実験に参加する・内容を確認するにはここをクリックしてください。

質問などは松香まで連絡ください。

Disclaimer

このcourselogにあるコードは、主に学部生・博士課程前期向けの講義・演習・実習で例として提示しているもので、原則直感的に分かりやすいように書いているつもりです。例によってはとても非効率なものもあるでしょうし、「やっつけ」で書いているものあります。また、普段はMATLABを使用していますので、変な癖がでているかもしれません。これらの例を使用・参考にする場合はそれを踏まえてたうえで使用・参考にして下さい。
卒業論文に関する資料:[2015PDF] [word template] [latex template] [表紙] [レポートの書き方] [引用文献など]
A Brief Guide to R (Rのコマンドの手引きおよび例)はこちらをどうぞ

データ解析基礎論B 判別分析

means = matrix(c(52.1,43.3,25.9,74.8,71,23.4),nrow=3,byrow=T)
var = array(c(355.7,203.8,203.8,252.5,180.4,-198.8,-198.8,359.4,435.1,212.6,212.6,166.4),dim=c(2,2,3))
library(plot3D)
library(matrixcalc)
x1 = seq(0,100,length.out=100)
M <- mesh(x1,x1)
inv.var1<-matrix.inverse(var[,,1])
inv.var2<-matrix.inverse(var[,,2])
inv.var3<-matrix.inverse(var[,,3])
dens1=M$x
dens2=M$x
dens3=M$x
for (i.r in 1:100){
  for (i.c in 1:100){
    dens1[i.r, i.c]=(c(M$x[i.r,i.c],M$y[i.r,i.c])- means[1,])%*%inv.var1%*%(rbind(M$x[i.r,i.c],M$y[i.r,i.c])- means[1,])
    dens2[i.r, i.c]=(c(M$x[i.r,i.c],M$y[i.r,i.c])- means[2,])%*%inv.var2%*%(rbind(M$x[i.r,i.c],M$y[i.r,i.c])- means[2,])
    dens3[i.r, i.c]=(c(M$x[i.r,i.c],M$y[i.r,i.c])- means[3,])%*%inv.var3%*%(rbind(M$x[i.r,i.c],M$y[i.r,i.c])- means[3,])
 }
}
contour(exp(-dens1),drawlabels = F, col='red')
contour(exp(-dens2),add=T,drawlabels =F,col='blue')
contour(exp(-dens3),add=T,drawlabels =F,col='green')
points(0.4,0.6,pch=18,cex=4)
dat<-data.frame(writing=c(68,85,50,54,66,35,56,25,43,70),
                interview=c(65,80,95,70,75,55,65,75,50,40), 
                cl=c(rep("A",5),rep("N",5)))


library(MASS)
dat.lda<-lda(cl~.,data=dat)
plot(dat$writing,dat$interview,xlab="writing",ylab="review",col=dat$cl,pch=20,cex=3,xlim=c(20,110),ylim=c(20,110))
abline(a=dat.lda$scaling[2],b=dat.lda$scaling[1])
intcpt = (dat.lda$scaling[1]*dat.lda$means[1,1]+dat.lda$scaling[2]*dat.lda$means[1,2]+
         dat.lda$scaling[1]*dat.lda$means[2,1]+dat.lda$scaling[2]*dat.lda$means[2,2])/2
a=intcpt/dat.lda$scaling[2]
b=-dat.lda$scaling[1]/dat.lda$scaling[2]
b2=dat.lda$scaling[2]/dat.lda$scaling[1]
x=0:120
lines(x,a+b*x,col='red')
lines(x,-60+b2*x,col='blue')

dat<-read.csv("http://matsuka.info/data_folder/tdkDA01.csv", header=T)
dat.lda<-lda(class~.,dat)
lda.pred<-predict(dat.lda,dat)
table(lda.pred$class, dat$class)

dat.lda<-lda(class~.,dat, CV=T)
cbind(dat.lda$posterior,dat$class)

dat<-read.csv("http://matsuka.info/data_folder/tdkDA02.csv",header=T)
dat.lda=lda(class~.,data=dat)
lda.pred<-predict(dat.lda,dat)
table(lda.pred$class, dat$class)
plot(dat.lda, dimen=2, col=as.numeric(lda.pred$class), cex=3) 
plot(dat.lda, dimen=3, col=as.numeric(lda.pred$class),cex=2)

dat.km<-kmeans(dat[,1:6],5)
table(lda.pred$class,dat.km$cluster)

dat<-read.csv("http://www.matsuka.info/data_folder/tdkCFA.csv")
dat1<-subset(dat,dat$popularity<5)
dat2<-subset(dat,dat$popularity>4 & dat$popularity<6)
dat3<-subset(dat,dat$popularity>6)
dat1$popularity="LP";dat2$popularity="MP";dat3$popularity="VP"
datT=rbind(dat1,dat2,dat3)
datT.lda<-lda(popularity~.,datT)

datT.pred<-predict(datT.lda,datT)
table(datT.pred$class,datT$popularity)   
plot(datT.lda,col=c(rep('red',20),rep('blue',28),rep('green',29)),cex=1.5)
plot(datT.lda,col=as.numeric(datT.pred$class),cex=1.5)
plot(datT.lda,col=as.numeric(datT.pred$class)+2,cex=1)
colors=rep("black",nrow(datT))
miss.idx = which(datT.pred$class!= datT$popularity)
colors[miss.idx]='red'
points(datT.pred$x,pch=20,col=colors)
legend("bottomright",c("correct pred.", "missed"), pch=20,col=(1:2))

dat<-read.csv("http://matsuka.info/data_folder/tdkDA01.csv", header=T)
dat.lda<-lda(class~.,dat)
dat.lda.pred<-predict(dat.lda)
dat.glm<-glm(class~.,family=binomial,data=dat)
dat.glm.pred<-predict(dat.glm,type="response")
head(cbind(dat.glm.pred,dat.lda.pred$posterior))
cor(dat.glm.pred,dat.lda.pred$posterior[,2])

認知情報解析演習 DL-V2 ch4

# p136
embed.forwd <- function(W, idx){
  return(W[idx,])
}

h1 <- embed.forwd(W,idx1)
h2 <- embed.forwd(W,idx2)
h = (h1 + h2)/2

embed.dot.forwd <- function(W, h, idx){
  return(O=rowSums(W[idx,]*h))
}

s = embed.dot.forwd(network$W.out,h,batch.y)

sigmoid.forwd <- function(O){
  return(1/(1+exp(-O)))
}

sigmoidWL.forwd <- function(O,target){
  delta = 1e-7
  y = 1/(1+exp(-O)) - delta
  loss=-sum(target*log(y)+(1-target)*log(1 - y))
  return(loss)
}

sigmoidWL.backwd <- function(O,target,dout=1){
  delta = 1e-5
  y = 1/(1+exp(-O)) - delta
  dW = (y - target)*dout
  return(dW)
}

embed.dot.backwd <- function(W,h,idx,dout){
  dh = dout * W[idx,]
  dW = dout * h
  return(list(dh = dh, dW = dW))
}

embed.backwd <- function(W, idx, dout){
  dW = matrix(0,nrow(W),ncol(W))
  for (i.idx in 1:length(idx)){
    dW[idx[i.idx], ] = dW[idx[i.idx], ] + dout[i.idx, ]
  }
  return(dW)
}

# 158

txt2corpus <- function(txt){
  txt = tolower(txt)
  txt = gsub('[.]', ' .',txt)
  words = unlist(strsplit(txt, " "))
  uniq.words = unique(words)
  n.uniq = length(uniq.words)
  n.words = length(words)
  corpus = rep(0,n.words)
  corpus = match(words,uniq.words)
  return(corpus)
}

corp2contxt <- function(corpus){
  len.corp = length(corpus)
  target = corpus[2:(len.corp-1)]
  col1 = corpus[1:(len.corp-2)]
  col2 = corpus[3:len.corp]
  context = cbind(col1,col2)
  return(list(context=context,target = target))
}

init.W2V <- function(n.uniq,size.hidden){
  W.in = matrix(rnorm(n.uniq*size.hidden),nrow=n.uniq)*0.01
  W.out = matrix(rnorm(n.uniq*size.hidden),nrow=n.uniq)*0.01
  return(list(W.in = W.in, W.out= W.out))
}


### running example
network<-init.W2V(8,5)
n.batch = 3;
txt = "You say goodbye and I say hello.."
corp = txt2corpus(txt)
corp = c(8,8,corp)
dat<-corp2contxt(corp)
n.data =  length(dat$target)

n.iter = 3000;
lambda=0.1;
loss = rep(0, n.iter)
for (i.iter in 1:n.iter){
  samp.idx = sample(1:n.data,n.batch)
  batch.c1 = dat$context[samp.idx,1]
  batch.c2 = dat$context[samp.idx,2]
  batch.y = dat$target[samp.idx]
  h1 <- embed.forwd(network$W.in,batch.c1)
  h2 <- embed.forwd(network$W.in,batch.c2)
  h = 0.5 * (h1 + h2)
  # positive only
  s = embed.dot.forwd(network$W.out,h,batch.y)
  z = sigmoidWL.forwd(s,1)
  loss[i.iter] = z
  ds = sigmoidWL.backwd(s, 1, 1)
  dE = embed.dot.backwd(network$W.out,h, batch.y, ds)
  dh = dE$dh*0.5
  dIn1 = embed.backwd(network$W.in,dat$context[samp.idx,1], dh)
  dIn2 = embed.backwd(network$W.in,dat$context[samp.idx,2], dh)
  network$W.out[batch.y,] = network$W.out[batch.y,] - lambda*dE$dW
  network$W.in = network$W.in - lambda*dIn1
  network$W.in = network$W.in - lambda*dIn2
}
plot(loss, type='l')

samp.idx = c(2:6,8,9,1)
batch.c1 = dat$context[samp.idx,1]
batch.c2 = dat$context[samp.idx,2]
batch.y = dat$target[samp.idx]
h1 <- embed.forwd(network$W.in,batch.c1)
h2 <- embed.forwd(network$W.in,batch.c2)
h = 0.5 * (h1 + h2)
s = MatMult.forwd(h,t(network$W.out))
z = sigmoid.forwd(s)
res=cbind(z,batch.y)
par(mfrow=c(8,1))
for (i in 1:8){
  col.spec = rep("black",8)
  col.spec[i]="orange"
  barplot(res[i, 1:8],col=col.spec)
}
# with negative sampling
# p158

unigram.sampler <- function(corpus, target, power, sample.size){
  n.word = length(unique(corpus))
  prob = (as.vector(table(corpus))/n.word)^power
  prob = prob/sum(prob)
  sample.idx = sample((1:n.word)[-target], sample.size, prob = prob[-target])
  return(sort(sample.idx))
}

get.neg.sample <- function(corpus, target, power, sample.size){
  sample.id = matrix(0, nrow = length(target), ncol = sample.size)
  for (i.targ in 1:length(target)){
    sample.id[i.targ,] = unigram.sampler(corpus, target[i.targ], power, sample.size)
  }
  return(sample.id)
}

init.W2V <- function(n.uniq,size.hidden){
  W.in = matrix(rnorm(n.uniq*size.hidden),nrow=n.uniq)*0.01
  W.out = matrix(rnorm(n.uniq*size.hidden),nrow=n.uniq)*0.01
  return(list(W.in = W.in, W.out= W.out))
}

network<-init.W2V(8,5)
n.batch = 3;
txt = "You say goodbye and I say hello.."
corp = txt2corpus(txt)
corp = c(8,8,corp)
dat<-corp2contxt(corp)
n.data =  length(dat$target)

n.iter = 3000;
lambda=0.1;
loss = rep(0, n.iter)
sample.size = 2
for (i.iter in 1:n.iter){
  samp.idx = sample(1:n.data,n.batch)
  batch.c1 = dat$context[samp.idx,1]
  batch.c2 = dat$context[samp.idx,2]
  batch.y = dat$target[samp.idx]
  h1 <- embed.forwd(network$W.in,batch.c1)
  h2 <- embed.forwd(network$W.in,batch.c2)
  h = 0.5 * (h1 + h2)
  # positive
  s = embed.dot.forwd(network$W.out,h,batch.y)
  z = sigmoidWL.forwd(s,1)
  loss[i.iter] = z
  ds = sigmoidWL.backwd(s, 1, 1)
  dE = embed.dot.backwd(network$W.out,h, batch.y, ds)
  dh = dE$dh*0.5
  dIn1 = embed.backwd(network$W.in,dat$context[samp.idx,1], dh)
  dIn2 = embed.backwd(network$W.in,dat$context[samp.idx,2], dh)
  network$W.out[batch.y,] = network$W.out[batch.y,] - lambda*dE$dW
  network$W.in = network$W.in - lambda*dIn1
  network$W.in = network$W.in - lambda*dIn2
  # negative
  neg.set <- get.neg.sample(corp,batch.y, 0.75, sample.size)
  for (i.neg in 1:sample.size){
      s = embed.dot.forwd(network$W.out,h,neg.set[,i.neg])
      z = sigmoidWL.forwd(s,0)
      loss[i.iter] = loss[i.iter] + z
      ds = sigmoidWL.backwd(s, 0, dout=1)
      dE = embed.dot.backwd(network$W.out,h,neg.set[,i.neg], ds)
      dh = dE$dh*0.5
      dIn1 = embed.backwd(network$W.in, dat$context[samp.idx,1],dh)
      dIn2 = embed.backwd(network$W.in, dat$context[samp.idx,2],dh)
      network$W.out[neg.set[,i.neg],] = network$W.out[neg.set[,i.neg],] - lambda/sample.size*dE$dW
      network$W.in = network$W.in - lambda*dIn1
      network$W.in = network$W.in - lambda*dIn2
    }
}
plot(loss, type='l')

samp.idx = c(2:6,8,9,1)
batch.c1 = dat$context[samp.idx,1]
batch.c2 = dat$context[samp.idx,2]
batch.y = dat$target[samp.idx]
h1 <- embed.forwd(network$W.in,batch.c1)
h2 <- embed.forwd(network$W.in,batch.c2)
h = 0.5 * (h1 + h2)
s = MatMult.forwd(h,t(network$W.out))
z = sigmoid.forwd(s)
res=cbind(z,batch.y)
par(mfrow=c(8,1))
for (i in 1:8){
  col.spec = rep("black",8)
  col.spec[i]="orange"
  barplot(res[i, 1:8],col=col.spec)
}

par(mfrow=c(4,1))
for (i in 1:4){
  col.spec = rep("black",8)
  col.spec[i]="orange"
  barplot(res[i, 1:8],col=col.spec)
}

院:認知情報解析学演習 ch23 & 24

dat<-read.csv("http://peach.l.chiba-u.ac.jp/course_folder/OrdinalProbitData-1grp-1.csv" )

model = "
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dcat( pr[i,1:nYlevels] )
    pr[i,1] <- pnorm( thresh[1] , mu , 1/sigma^2 )
    for ( k in 2:(nYlevels-1) ) {
      pr[i,k] <- max( 0 ,  pnorm( thresh[ k ] , mu , 1/sigma^2 )- pnorm( thresh[k-1] , mu , 1/sigma^2 ) )
    }
    pr[i,nYlevels] <- 1 - pnorm( thresh[nYlevels-1] , mu , 1/sigma^2 )
  }
  mu ~ dnorm( (1+nYlevels)/2 , 1/(nYlevels)^2 )
  sigma ~ dunif( nYlevels/1000 , nYlevels*10 )
  for ( k in 2:(nYlevels-2) ) {  # 1 and nYlevels-1 are fixed, not stochastic
    thresh[k] ~ dnorm( k+0.5 , 1/2^2 )
  }
}
"
writeLines( model , "model.txt" )

y = dat$Y
Ntotal = length(y)
nYlevels = max(y)
thresh = rep(NA,nYlevels-1)
thresh[1] = 1 + 0.5
thresh[nYlevels-1] = nYlevels-1 + 0.5
dataList = list(y = y , nYlevels = nYlevels,thresh = thresh, Ntotal = Ntotal)

parameters = c( "pr" ,  "thresh","mu","sigma")
jagsModel = jags.model( "model.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)

meanT = rowMeans(cbind(mcmcMat[,"thresh[1]"],mcmcMat[,"thresh[2]"],mcmcMat[,"thresh[3]"],mcmcMat[,"thresh[4]"],mcmcMat[,"thresh[5]"],
                   mcmcMat[,"thresh[6]"]))
plot(mcmcMat[,"thresh[1]"],meanT,xlim=c(0.5,7))
points(mcmcMat[,"thresh[2]"],meanT,xlim=c(0.5,7))
points(mcmcMat[,"thresh[3]"],meanT,xlim=c(0.5,7))
points(mcmcMat[,"thresh[4]"],meanT,xlim=c(0.5,7))
points(mcmcMat[,"thresh[5]"],meanT,xlim=c(0.5,7))
points(mcmcMat[,"thresh[6]"],meanT,xlim=c(0.5,7))
HDI.plot(mcmcMat[,"mu"],xlab="mu")
HDI.plot(mcmcMat[,"sigma"],xlab="sigma")


model2 = "
  model {
for ( i in 1:Ntotal ) {
y[i] ~ dcat( pr[i,1:nYlevels] )
pr[i,1] <- pnorm( thresh[1] , mu[x[i]] , 1/sigma[x[i]]^2 )
for ( k in 2:(nYlevels-1) ) {
pr[i,k] <- max( 0 ,  pnorm( thresh[ k ] , mu[x[i]] , 1/sigma[x[i]]^2 )
- pnorm( thresh[k-1] , mu[x[i]] , 1/sigma[x[i]]^2 ) )
}
pr[i,nYlevels] <- 1 - pnorm( thresh[nYlevels-1] , mu[x[i]] , 1/sigma[x[i]]^2 )
}
for ( j in 1:2 ) { # 2 groups
mu[j] ~ dnorm( (1+nYlevels)/2 , 1/(nYlevels)^2 )
sigma[j] ~ dunif( nYlevels/1000 , nYlevels*10 )
}
for ( k in 2:(nYlevels-2) ) {  # 1 and nYlevels-1 are fixed, not stochastic
thresh[k] ~ dnorm( k+0.5 , 1/2^2 )
}
}
"
writeLines( model2 , "model2.txt" )

dat<-read.csv("http://peach.l.chiba-u.ac.jp/course_folder/OrdinalProbitData1.csv")
y = dat$Y
x = as.numeric(dat$X)
Ntotal = length(y)
nYlevels = max(y)
thresh = rep(NA,nYlevels-1)
thresh[1] = 1 + 0.5
thresh[nYlevels-1] = nYlevels-1 + 0.5
dataList = list(y = y , x= x ,nYlevels = nYlevels,thresh = thresh, Ntotal = Ntotal)

parameters = c( "pr" ,  "thresh","mu","sigma")
jagsModel = jags.model( "model2.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)
HDI.plot(mcmcMat[,"mu[1]"])
HDI.plot(mcmcMat[,"mu[2]"])
HDI.plot(mcmcMat[,"mu[1]"]-mcmcMat[,"mu[2]"])


model3 = "
data {
xm  <- mean(x)
xsd <-   sd(x)
for ( i in 1:Ntotal ) {
zx[i] <- ( x[i] - xm ) / xsd
}
}
model {
for ( i in 1:Ntotal ) {
y[i] ~ dcat( pr[i,1:nYlevels] )
pr[i,1] <- pnorm( thresh[1] , mu[i] , 1/sigma^2 )
for ( k in 2:(nYlevels-1) ) {
pr[i,k] <- max( 0 ,  pnorm( thresh[ k ] , mu[i] , 1/sigma^2 )
- pnorm( thresh[k-1] , mu[i] , 1/sigma^2 ) )
}
pr[i,nYlevels] <- 1 - pnorm( thresh[nYlevels-1] , mu[i] , 1/sigma^2 )
mu[i] <- zbeta0 + sum( zbeta * zx[i] )
}
# Priors vague on standardized scale:
zbeta0 ~ dnorm( (1+nYlevels)/2 , 1/(nYlevels)^2 )
zbeta ~ dnorm( 0 , 1/(nYlevels)^2 )

zsigma ~ dunif( nYlevels/1000 , nYlevels*10 )
# Transform to original scale:
beta <- ( zbeta / xsd )
beta0 <- zbeta0  - sum( zbeta * xm / xsd )
sigma <- zsigma
for ( k in 2:(nYlevels-2) ) {  # 1 and nYlevels-1 are fixed
thresh[k] ~ dnorm( k+0.5 , 1/2^2 )
}
}
"
writeLines( model3, con="model3.txt" )

dat<-read.csv("http://peach.l.chiba-u.ac.jp/course_folder/OrdinalProbitData-LinReg-2.csv")
y = dat$Y
x = as.numeric(dat$X)
Ntotal = length(y)
Nx =1
nYlevels = max(y)
thresh = rep(NA,nYlevels-1)
thresh[1] = 1 + 0.5
thresh[nYlevels-1] = nYlevels-1 + 0.5
dataList = list(y = y , Nx= 1,x= x ,nYlevels = nYlevels,thresh = thresh, Ntotal = Ntotal)

parameters = c("thresh","mu","sigma","beta","beta0")
jagsModel = jags.model( "model3.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)
plot(dat$X,dat$Y,pch=20,cex=3)
for (i.plot in 1:100){
  abline(a=mcmcMat[i.plot,"beta0"],b=mcmcMat[i.plot,"beta"],col="orange",lwd=2)
}
HDI.plot(mcmcMat[,"beta"])
#plot(1,1,type='n',xlim=c(0,6),ylim=c(0,6))
#abline(a=0,b=1,lwd=2)
#x.temp = seq(0,6,length.out=100)
#y = dnorm(x.temp,mean=3,sd=1)
#lines(x=-y*3+1,y=x.temp)
#lines(x=c(-1,3),y=c(3,3))
#lines(x=c(-1,0.5),y=c(0.5,0.5),lty=2,col="red")
#lines(x=c(-1,1.5),y=c(1.5,1.5),lty=2,col="red")
#lines(x=c(-1,2.5),y=c(2.5,2.5),lty=2,col="red")
#lines(x=c(-1,3.5),y=c(3.5,3.5),lty=2,col="red")
#lines(x=c(-1,4.5),y=c(4.5,4.5),lty=2,col="red")
#lines(x=c(-1,5.5),y=c(5.5,5.5),lty=2,col="red")

model4="
model {
  for ( i in 1:Ncell ) {
    y[i] ~ dpois( lambda[i] )
    lambda[i] <- exp( a0 + a1[x1[i]] + a2[x2[i]] + a1a2[x1[i],x2[i]] )
  }
  a0 ~ dnorm( yLogMean , 1/(yLogSD*2)^2 )
  for ( j1 in 1:Nx1Lvl ) { a1[j1] ~ dnorm( 0.0 , 1/a1SD^2 ) }
  a1SD ~ dgamma(agammaShRa[1],agammaShRa[2])
  for ( j2 in 1:Nx2Lvl ) { a2[j2] ~ dnorm( 0.0 , 1/a2SD^2 ) }
  a2SD ~ dgamma(agammaShRa[1],agammaShRa[2])
  for ( j1 in 1:Nx1Lvl ) { for ( j2 in 1:Nx2Lvl ) {
    a1a2[j1,j2] ~ dnorm( 0.0 , 1/a1a2SD^2 )
  } }
  a1a2SD ~ dgamma(agammaShRa[1],agammaShRa[2])
  # Convert a0,a1[],a2[],a1a2[,] to sum-to-zero b0,b1[],b2[],b1b2[,] :
  for ( j1 in 1:Nx1Lvl ) { for ( j2 in 1:Nx2Lvl ) {
    m[j1,j2] <- a0 + a1[j1] + a2[j2] + a1a2[j1,j2] # cell means
  } }
  b0 <- mean( m[1:Nx1Lvl,1:Nx2Lvl] )
  for ( j1 in 1:Nx1Lvl ) { b1[j1] <- mean( m[j1,1:Nx2Lvl] ) - b0 }
  for ( j2 in 1:Nx2Lvl ) { b2[j2] <- mean( m[1:Nx1Lvl,j2] ) - b0 }
  for ( j1 in 1:Nx1Lvl ) { for ( j2 in 1:Nx2Lvl ) {
    b1b2[j1,j2] <- m[j1,j2] - ( b0 + b1[j1] + b2[j2] )
  } }
  # Compute predicted proportions:
  for ( j1 in 1:Nx1Lvl ) { for ( j2 in 1:Nx2Lvl ) {
    expm[j1,j2] <- exp(m[j1,j2])
    ppx1x2p[j1,j2] <- expm[j1,j2]/sum(expm[1:Nx1Lvl,1:Nx2Lvl])
  } }
  for ( j1 in 1:Nx1Lvl ) { ppx1p[j1] <- sum(ppx1x2p[j1,1:Nx2Lvl]) }
  for ( j2 in 1:Nx2Lvl ) { ppx2p[j2] <- sum(ppx1x2p[1:Nx1Lvl,j2]) }
}"
writeLines( model4, con="model4.txt" )

dat = read.csv( file="http://peach.l.chiba-u.ac.jp/course_folder/HairEyeColor.csv" )

y=dat$Count
x1=dat$Eye
x2=dat$Hair

x1 = as.numeric(as.factor(x1))
x1levels = levels(as.factor(dat$Eye))
x2 = as.numeric(as.factor(x2))
x2levels =  levels(as.factor(dat$Eye))
Nx1Lvl = length(unique(x1))
Nx2Lvl = length(unique(x2))
Ncell = length(y) # number of rows of data, not sum(y)

yLogMean = log(sum(y)/(Nx1Lvl*Nx2Lvl))
yLogSD = log(sd(c(rep(0,Ncell-1),sum(y))))
MeanSD2gamma <- function( mean, sd ) {
  shape = mean^2 / sd^2
  rate = mean / sd^2
  return(data.frame(shape,rate))
}

ModeSD2gamma <- function( mode, sd ) {
  rate = ( mode + sqrt( mode^2 + 4 * sd^2 ) )/( 2 * sd^2 )
  shape = 1 + mode * rate
  return(data.frame(shape,rate))
}
temp=ModeSD2gamma(mode=yLogSD , sd=2*yLogSD)
agammaShRa = unlist(temp )

data_list = list(
  y = y ,x1 = x1 , x2 = x2 ,
  Ncell = Ncell , Nx1Lvl = Nx1Lvl , Nx2Lvl = Nx2Lvl ,
  yLogMean = yLogMean , yLogSD = yLogSD ,agammaShRa = agammaShRa
)

jagsModel =jags.model("model4.txt", data = data_list, n.chains = 3, n.adapt = 500)
update(jagsModel, 500)
codaSamples = coda.samples(jagsModel, variable.names = c("b0", "b1", "b2", "b1b2", "ppx1p", "ppx2p", "ppx1x2p"),
                           n.iter = ((10000*1)/1), n.adapt = 500)
mcmcMat<-as.matrix(codaSamples)
EyeBlueHairBlack <- mcmcMat[,"ppx1x2p[1,1]"]
HDI.plot(EyeBlueHairBlack)


EyeBlue_vs_EyeBrown_at_HairBlack <- mcmcMat[,"b1b2[1,1]"] - mcmcMat[,"b1b2[2,1]"] - mcmcMat[,"b1[2]"] + mcmcMat[,"b1[1]"]
HDI.plot(EyeBlue_vs_EyeBrown_at_HairBlack)

EyeBlue_vs_EyeBrown_at_Blond <- mcmcMat[,"b1b2[1,2]"] - mcmcMat[,"b1b2[2,2]"] - mcmcMat[,"b1[2]"] + mcmcMat[,"b1[2]"]
HDI.plot(EyeBlue_vs_EyeBrown_at_Blond)

diff <- EyeBlue_vs_EyeBrown_at_HairBlack - EyeBlue_vs_EyeBrown_at_Blond
HDI.plot(diff)

基礎自習B02

dec2bin<-function(num, digits=8) {
  bin=c()
  if (num==0){
    bin=0
  } else {
    while(num!=0){
      rem=num%%2
     num=num%/%2
      bin=c(rem,bin)
    }
  }
  if (length(bin)
			

データ解析基礎論B Loglineak Analysis

freq<-c(33,37,7,23)
pref<-factor(c('soba','udon','soba','udon'))
region<-factor(c('east','east','west','west'))
dat<-data.frame(pref,region,freq)
dat.table=table(pref,region)
dat.table[cbind(pref,region)]<-freq

dat.loglinCE_A<-loglin(dat.table, list(1), fit=T,param=T)
dat.loglinCE_B<-loglin(dat.table,list(2), fit=T,param=T)
dat.loglinIND<-loglin(dat.table,list(1,2), fit=T,param=T)
dat.loglinSAT<-loglin(dat.table,list(c(1,2)), fit=T,param=T)


freq<-c(9,5,2,4,16,10,26,28)
gener<-factor(c(rep('female',4),c(rep('male',4))))
affil<-factor(rep(c('L','L','E','E'),2))
circle<-factor(rep(c('tennis','astro'),4))
dat<-data.frame(gener,affil,circle,freq)

dat.table<-table(gender,affil,circle)
dat.table[cbind(gender,affil,circle)]<-freq

dat.loglin2<-loglin(dat.table,list(1), fit=T,param=T)
dat.loglin3<-loglin(dat.table,list(1,3), fit=T,param=T)
dat.loglin4<-loglin(dat.table,list(1,2,3), fit=T,param=T)
dat.loglin5<-loglin(dat.table,list(c(1,3),2), fit=T,param=T)
dat.loglin6<-loglin(dat.table,list(c(1,3),c(1,2)), fit=T,param=T)
dat.loglin7<-loglin(dat.table,list(c(1,3),c(1,2),c(2,3)), fit=T,param=T)
dat.loglin8<-loglin(dat.table,list(c(1,2,3)), fit=T,param=T)

source('http://peach.l.chiba-u.ac.jp/course_folder/cuUtil02.R')

認知情報解析演習  Logistic Regression

library(rjags)
source("http://peach.l.chiba-u.ac.jp/course_folder/HDI_revised.txt")

dat<-read.csv("http://peach.l.chiba-u.ac.jp/course_folder/HtWtData110.csv")
library(plot3D)
w = seq(80,360,length.out=100)
h = seq(50, 75, length.out=100)
M <- mesh(w,h)
P.male = 1/(1+exp(-1*(0.018*M$x+0.7*M$y-50)))

scatter3D(dat$weight, dat$height, dat$mal, pch = 19, cex = 2,
          theta = 30, phi = 45, ticktype = "detailed", zlim=c(-0.1,1),ylim=c(50,78),xlim=c(80,360),
          xlab = "weight", ylab = "height", zlab = "P(male)",
          surf = list(x = M$x, y = M$y, z = P.male,facets = NA))


y = dat$male; x = dat$weight; Ntotal = length(y)
dataList = list(y = y, x = x, Ntotal = Ntotal)

model.txt = "
data {
  xm <- mean(x)
  xsd <- sd(x)
  for (i in 1:Ntotal){
    zx[i] = (x[i] - xm)/xsd
  }
}
model {
  for ( i_data in 1:Ntotal ) {
    y[ i_data ] ~ dbern(ilogit( zbeta0 + zbeta * zx[i_data]))
  }
  zbeta0 ~ dnorm(0, 1/2^2)
  zbeta ~ dnorm(0, 1/2^2)

  beta <- zbeta / xsd
  beta0 <- zbeta0 - zbeta * xm/xsd
}"
writeLines(model.txt, "model1.txt")
parameters = c( "beta0" ,  "beta")
jagsModel = jags.model( "model1.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)

plot(dat$weight,dat$male,xlim=c(90,280),yaxt="n",ylab="Male / Female",
     xlab="Weight", cex=2.5)
axis(2,at = 0:1,labels=c("Femal","Male"))
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
temp.x = seq(90,280,length.out = 100)
for (i_sample in 1:n2plot) {
  temp.y = 1/(1+exp(-1*(mcmcMat[idx[i_sample],2] + mcmcMat[idx[i_sample],1]*temp.x)))
  lines(temp.x, temp.y, col='orange', lwd=2)
}


x = cbind(dat$weight,dat$height);Nx = ncol(x)
dataList = list(y = y, x = x, Ntotal = Ntotal, Nx = Nx)

model.txt = "
data {
  for (j in 1:Nx){
    xm[j] <- mean(x[,j])
    xsd[j] <- sd(x[,j])
    for (i in 1:Ntotal){
      zx[i,j] = (x[i,j] - xm[j])/xsd[j]
    }
  }
}
model {
  for ( i_data in 1:Ntotal ) {
    y[ i_data ] ~ dbern(ilogit( zbeta0 + sum(zbeta[1:Nx] * zx[i_data, 1:Nx ])))
  }
  zbeta0 ~ dnorm(0, 1/2^2)
  for (j in 1:Nx){
    zbeta[j] ~ dnorm(0, 1/2^2)
  }
  beta[1:Nx] <- zbeta[1:Nx] / xsd[1:Nx]
  beta0 <- zbeta0 -sum(zbeta[1:Nx] * xm[1:Nx]/xsd[1:Nx])
}"
writeLines(model.txt, "model.txt")

parameters = c( "beta0" ,  "beta")
jagsModel = jags.model( "model.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)
par(mfrow=c(1,3))
HDI.plot(mcmcMat[,3],xlabel='intercept')
HDI.plot(mcmcMat[,1],xlabel='weight')
HDI.plot(mcmcMat[,2],xlabel='height')

par(mfrow=c(1,1))
plot(dat$weight,dat$height,xlab="Weight", ylab="Height", type="n")
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
for (i_sample in 1:n2plot) {
  abline(a=-1*mcmcMat[idx[i_sample],3]/mcmcMat[idx[i_sample],2],
         b=-1*mcmcMat[idx[i_sample],1]/mcmcMat[idx[i_sample],2],col="orange")

}
points(dat$weight,dat$height,pch=paste(dat$male), cex=1.5)

# un-even data
x = rnorm(300)
pr = 1/(1+exp(2*x))
y = pr < runif(300)
plot(x,y)

remove.id = sample(which(y == 0),120)

Ntotal = length(y[-remove.id])
dataList = list(y = y[-remove.id], x = x[-remove.id], Ntotal = Ntotal)

model.txt = "
data {
xm <- mean(x)
xsd <- sd(x)
for (i in 1:Ntotal){
zx[i] = (x[i] - xm)/xsd
}
}
model {
for ( i_data in 1:Ntotal ) {
y[ i_data ] ~ dbern(ilogit( zbeta0 + zbeta * zx[i_data]))
}
zbeta0 ~ dnorm(0, 1/2^2)
zbeta ~ dnorm(0, 1/2^2)

beta <- zbeta / xsd
beta0 <- zbeta0 - zbeta * xm/xsd
}"
writeLines(model.txt, "model1.txt")
parameters = c( "beta0" ,  "beta")
jagsModel = jags.model( "model1.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)

plot(x[-remove.id],y[-remove.id],xlim=c(-3,3))
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
temp.x = seq(-3,3,length.out = 100)
for (i_sample in 1:n2plot) {
  temp.y = 1/(1+exp(-1*(mcmcMat[idx[i_sample],2] + mcmcMat[idx[i_sample],1]*temp.x)))
  lines(temp.x, temp.y, col='orange', lwd=2)
}


x1 = rnorm(150)
x2 = x1*0.9+rnorm(150,0,0.5)
pr = 1/(1+exp(x1+x2))
y = pr < runif(150)
Ntotal = length(y)
dataList = list(y = y, x = cbind(x1,x2), Ntotal = Ntotal, Nx = 2)


model.txt = "
data {
for (j in 1:Nx){
xm[j] <- mean(x[,j])
xsd[j] <- sd(x[,j])
for (i in 1:Ntotal){
zx[i,j] = (x[i,j] - xm[j])/xsd[j]
}
}
}
model {
for ( i_data in 1:Ntotal ) {
y[ i_data ] ~ dbern(ilogit( zbeta0 + sum(zbeta[1:Nx] * zx[i_data, 1:Nx ])))
}
zbeta0 ~ dnorm(0, 1/2^2)
for (j in 1:Nx){
zbeta[j] ~ dnorm(0, 1/2^2)
}
beta[1:Nx] <- zbeta[1:Nx] / xsd[1:Nx]
beta0 <- zbeta0 -sum(zbeta[1:Nx] * xm[1:Nx]/xsd[1:Nx])
}"
writeLines(model.txt, "model.txt")

parameters = c( "beta0" ,  "beta")
jagsModel = jags.model( "model.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)
plot(x1,x2,xlab="x1", ylab="x2", type="n")
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
for (i_sample in 1:n2plot) {
  abline(a=-1*mcmcMat[idx[i_sample],3]/mcmcMat[idx[i_sample],2],
         b=-1*mcmcMat[idx[i_sample],1]/mcmcMat[idx[i_sample],2],col="orange")

}
points(x1,x2,pch=paste(y), cex=1.5)

# guessing
y = dat$male; x = dat$weight; Ntotal = length(y)
dataList = list(y = y, x = x, Ntotal = Ntotal)

model.txt = "
data {
xm <- mean(x)
xsd <- sd(x)
for (i in 1:Ntotal){
zx[i] = (x[i] - xm)/xsd
}
}
model {
for ( i_data in 1:Ntotal ) {
y[ i_data ] ~ dbern(mu[i_data])
mu[i_data] <- (guess*0.5 + (1-guess)*ilogit( zbeta0 + zbeta * zx[i_data]))
}
zbeta0 ~ dnorm(0, 1/2^2)
zbeta ~ dnorm(0, 1/2^2)
guess ~ dbeta(1,9)
beta <- zbeta / xsd
beta0 <- zbeta0 - zbeta * xm/xsd
}"
writeLines(model.txt, "model1.txt")
parameters = c( "beta0" ,  "beta", "guess")
jagsModel = jags.model( "model1.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)

plot(x[-remove.id],y[-remove.id],xlim=c(-3,3))
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
temp.x = seq(-3,3,length.out = 100)
for (i_sample in 1:n2plot) {
  temp.y = 1/(1+exp(-1*(mcmcMat[idx[i_sample],2] + mcmcMat[idx[i_sample],1]*temp.x)))
  lines(temp.x, temp.y, col='orange', lwd=2)
}
par(mfrow=c(1,3))
HDI.plot(mcmcMat[,2],xlabel='intercept')
HDI.plot(mcmcMat[,1],xlabel='weight')
HDI.plot(mcmcMat[,3],xlabel='guessing')

par(mfrow=c(1,1))
plot(dat$weight,dat$male,xlim=c(90,280),yaxt="n",ylab="Male / Female",
     xlab="Weight", cex=2.5)
axis(2,at = 0:1,labels=c("Femal","Male"))
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
temp.x = seq(90,280,length.out = 100)
for (i_sample in 1:n2plot) {
  temp.y = mcmcMat[idx[i_sample],3]/2+(1-mcmcMat[idx[i_sample],3])*1/(1+exp(-1*(mcmcMat[idx[i_sample],2] + mcmcMat[idx[i_sample],1]*temp.x)))
  lines(temp.x, temp.y, col='orange', lwd=2)
}

# nomial predictors
model.txt = "
model {
  for ( i.data in 1:Ntotal ) {
    y[ i.data ] ~ dbin(mu[i.data],N[i.data])
    mu[i.data] ~ dbeta(omega[x[i.data]]*(kappa-2)+1,(1-omega[x[i.data]])*(kappa-2)+1)
  }
  for (i.pos in 1:Npos){
    omega[i.pos] <- ilogit(a0+a[i.pos])
    a[i.pos] ~ dnorm(0.0, 1/aSigma^2)
  }
  a0 ~  dnorm(0,1/2^2)
  aSigma ~ dgamma(1.64, 0.32)
  kappa <- kappaMinusTwo +2
  kappaMinusTwo ~ dgamma(0.01,0.01)
  for (i.pos in 1:Npos){
    m[i.pos] <- a0+a[i.pos]
  }
  b0 <- mean(m[1:Npos])
  for (i.pos in 1:Npos){
    b[i.pos] <- m[i.pos] - b0
  }
}"
writeLines(model.txt, "model.txt")

dat<-read.csv("http://www.matsuka.info/data_folder/BattingAverage.csv")
y = dat$Hits
N = dat$AtBats
x = dat$PriPosNumber
Ntotal = length(y)
Npos = length(unique(x))
dataList = list(y = y, x = x, N = N, Ntotal = Ntotal, Npos = Npos)
parameters = c( "b0" ,  "b", "omega")
jagsModel = jags.model( "model.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)

par(mfrow=c(3,3))
for (i.pos in 1:9){
  HDI.plot(mcmcMat[,i.pos+10])
}

par(mfrow=c(2,2))
HDI.plot(mcmcMat[,1]-mcmcMat[,2])
HDI.plot(mcmcMat[,2]-mcmcMat[,3])
HDI.plot(mcmcMat[,11]-mcmcMat[,12])
HDI.plot(mcmcMat[,12]-mcmcMat[,13])

# softmax regression
x1 = runif(500, min=-2, max = 2)
x2 = runif(500, min=-2, max = 2)
b0 = c(0,-3,-4,-5)
b1 = c(0,-5,-1,10)
b2 = c(0,-5,10,-1)
l1 = b0[1]+b1[1]*x1+b2[1]*x2
l2 = b0[2]+b1[2]*x1+b2[2]*x2
l3 = b0[3]+b1[3]*x1+b2[3]*x2
l4 = b0[4]+b1[4]*x1+b2[4]*x2
p1 = exp(l1)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p2 = exp(l2)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p3 = exp(l3)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p4 = exp(l4)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
ps = cbind(p1,p2,p3,p4)
y = apply(ps,1,which.max)
plot(x1,x2,pch=y,col=y)

b0 = c(0,-4,-1,-1)
b1 = c(0,-5,1,3)
b2 = c(0,0,-5,3)
l1 = b0[1]+b1[1]*x1+b2[1]*x2
l2 = b0[2]+b1[2]*x1+b2[2]*x2
l3 = b0[3]+b1[3]*x1+b2[3]*x2
l4 = b0[4]+b1[4]*x1+b2[4]*x2
p1 = exp(l1)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p2 = exp(l2)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p3 = exp(l3)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p4 = exp(l4)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
ps = cbind(p1,p2,p3,p4)
y = apply(ps,1,which.max)
plot(x1,x2,pch=y,col=y)

p1 = exp(l1)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p2 = exp(l2)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p3 = exp(l3)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
p4 = exp(l4)/sum(exp(l1)+exp(l2)+exp(l3)+exp(l4))
ps = cbind(p1,p2,p3,p4)
p12 = pmax(p1,p2)
p34 = pmax(p3,p4)
y12vs34 = apply(cbind(p1,p2),1,which.max)
plot(x1,x2,pch=y12vs34,col=y12vs34)
y1vs2 = apply(cbind(p1,p3),1,which.max)
points(x1,x2,pch=y1vs2+2,col=y1vs2+2)
y3vs4 = apply(cbind(p1,p4),1,which.max)
points(x1,x2,pch=y3vs4+6,col=y3vs4+6)



model.txt = "
data {
  for ( j in 1:Nx ) {
    xm[j]  <- mean(x[,j])
    xsd[j] <-   sd(x[,j])
    for ( i in 1:Ntotal ) {
      zx[i,j] <- ( x[i,j] - xm[j] ) / xsd[j]
    }
  }
}
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dcat(mu[1:Nout,i])
    mu[1:Nout,i] <- explambda[1:Nout,i]/sum(explambda[1:Nout,i])
    for (k in 1:Nout){
      explambda[k,i]=exp(zbeta0[k] + sum(zbeta[k,1:Nx] * zx[i, 1:Nx ]))
    }
  }
  zbeta0[1] = 0
  for (j in 1:Nx){
    zbeta[1,j] <- 0
  }
  for (k in 2:Nout){
    zbeta0[k] ~ dnorm(0, 1/2^2)
    for (j in 1:Nx){
      zbeta[k,j]~dnorm(0, 1/2^2)
    }
  }
  for ( k in 1:Nout ) {
    beta[k,1:Nx] <- zbeta[k,1:Nx] / xsd[1:Nx] 
    beta0[k] <- zbeta0[k] - sum( zbeta[k,1:Nx] * xm[1:Nx] / xsd[1:Nx] )
  }
}"
writeLines(model.txt, "model.txt")

dat<-read.csv( "http://peach.l.chiba-u.ac.jp/course_folder/SoftmaxRegData2.csv" )
y = dat$Y
x = cbind(dat[,1],dat[,2])
Ntotal = length(y)
Nout = length(unique(y))
dataList = list(y = y, x = x, Nx = 2, Ntotal = Ntotal, Nout = Nout)
parameters = c( "beta0" ,  "beta")
jagsModel = jags.model( "model.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)
par(mfrow=c(1,3))
HDI.plot(mcmcMat[,7+0],xlab='intercept')
HDI.plot(mcmcMat[,1+0],xlab='b1')
HDI.plot(mcmcMat[,4+0],xlab='b2')

model = "
data {
  for ( j in 1:Nx ) {
    xm[j]  <- mean(x[,j])
    xsd[j] <-   sd(x[,j])
    for ( i in 1:Ntotal ) {
      zx[i,j] <- ( x[i,j] - xm[j] ) / xsd[j]
    }
  }
}
# Specify the model for standardized data:
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dcat( mu[1:Nout,i] ) 
    mu[1,i] <- phi[1,i]
    mu[2,i] <- phi[2,i] * (1-phi[1,i])
    mu[3,i] <- phi[3,i] * (1-phi[2,i]) * (1-phi[1,i])
    mu[4,i] <- (1-phi[3,i]) * (1-phi[2,i]) * (1-phi[1,i])
    for ( r in 1:(Nout-1) ) {
      phi[r,i] <- ilogit( zbeta0[r] + sum( zbeta[r,1:Nx] * zx[i,1:Nx] ) )
    }
  }
  for ( r in 1:(Nout-1) ) { 
    zbeta0[r] ~ dnorm( 0 , 1/20^2 )  
    for ( j in 1:Nx ) {
      zbeta[r,j] ~ dnorm( 0 , 1/20^2 )
    }
  }
  for ( r in 1:(Nout-1) ) {
    beta[r,1:Nx] <- zbeta[r,1:Nx] / xsd[1:Nx] 
    beta0[r] <- zbeta0[r] - sum( zbeta[r,1:Nx] * xm[1:Nx] / xsd[1:Nx] )
  }
}
" 
writeLines( model , "model.txt" )

dat<-read.csv( "http://peach.l.chiba-u.ac.jp/course_folder/CondLogistRegData1.csv" )
y = dat$Y
x = cbind(dat[,1],dat[,2])
Ntotal = length(y)
Nout = length(unique(y))
dataList = list(y = y, x = x, Nx = 2, Ntotal = Ntotal, Nout = Nout)
parameters = c( "beta0" ,  "beta")
jagsModel = jags.model( "model.txt", data=dataList, n.chains=3, n.adapt=500 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=parameters, n.iter=10000, thin=5)
mcmcMat<-as.matrix(codaSamples)
par(mfrow=c(1,1))
plot(x[,1],x[,2],col=y)
n2plot=100
idx=sample(1:nrow(mcmcMat),n2plot)
temp.x = seq(-3,3,length.out = 100)
for (i.cat in 0:2){
  for (i_sample in 1:n2plot) {
    abline(a=-1*mcmcMat[idx[i_sample],7+i.cat]/mcmcMat[idx[i_sample],4+i.cat],
           b=-1*mcmcMat[idx[i_sample],1+i.cat]/mcmcMat[idx[i_sample],4+i.cat],col="orange")
  }
}
model2 = "
data {
  for ( j in 1:Nx ) {
    xm[j]  <- mean(x[,j])
    xsd[j] <-   sd(x[,j])
      for ( i in 1:Ntotal ) {
        zx[i,j] <- ( x[i,j] - xm[j] ) / xsd[j]
      }
    }
  }
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dcat( mu[1:Nout,i] ) 
    mu[1,i] <- phi[2,i] * phi[1,i]
    mu[2,i] <- (1-phi[2,i]) * phi[1,i]
    mu[3,i] <- phi[3,i] * (1-phi[1,i])
    mu[4,i] <- (1-phi[3,i]) * (1-phi[1,i])
    for ( r in 1:(Nout-1) ) {
      phi[r,i] <- ilogit( zbeta0[r] + sum( zbeta[r,1:Nx] * zx[i,1:Nx] ) )
    }
  }
  for ( r in 1:(Nout-1) ) { 
    zbeta0[r] ~ dnorm( 0 , 1/20^2 )  
    for ( j in 1:Nx ) {
      zbeta[r,j] ~ dnorm( 0 , 1/20^2 )
    }
  }
  for ( r in 1:(Nout-1) ) {
    beta[r,1:Nx] <- zbeta[r,1:Nx] / xsd[1:Nx] 
    beta0[r] <- zbeta0[r] - sum( zbeta[r,1:Nx] * xm[1:Nx] / xsd[1:Nx] )
  }
}" 
writeLines( modelString , con="TEMPmodel.txt" )

基礎実習B01

mk.pay.mat <- function(food, cost){
  A = (food - cost)/2
  B = 0
  C = food
  D = food / 2
  pay.mat = matrix(c(A,B,C,D), nrow=2)
  return(pay.mat)
}

is.ESS2s <- function(pay.mat){
  ESS1 = pay.mat[1,1] > pay.mat[2,1]
  ESS2 = pay.mat[2,2] > pay.mat[1,2]
  return(c(ESS1,ESS2))
}

pay.mat = mk.pay.mat(6,10)
p = c(0.5, 0.5)
calc.W <- function(pay.mat, p){
  # W1 = sum(p*pay.mat[1,])
  # W2 = sum(p*pay.mat[2,])
  # W = c(W1, W2)
  W = colSums(t(pay.mat)*p)

  # W.ave = pay.mat[1,1]*p[1]^2 + pay.mat[2,1]*p[1]*p[2] +
  #         pay.mat[1,2]*p[2]*p[1] + pay.mat[2,2]*p[2]^2
  W.ave = sum(pay.mat * outer(p,p))
  return(list(W=W, W.ave = W.ave))
}

food = 6; cost =10;
maxIter = 1000; dt = 0.1
pH = 0.2
p = c(pH, (1-pH))
p.hist = matrix(0, nrow=maxIter, ncol =2)
p.hist[1,] = p
pay.mat = mk.pay.mat(food, cost)
for (i.iter in 2:maxIter) {
   Ws <- calc.W(pay.mat, p)
   # p[1] = p[1] + p[1]*(Ws$W[1] - Ws$W.ave)/Ws$W.ave*dt
   # p[2] = p[2] + p[2]*(Ws$W[2] - Ws$W.ave)/Ws$W.ave*dt
   p = p + p*(Ws$W - Ws$W.ave)/Ws$W.ave*dt
   p.hist[i.iter, ] = p
}

HD.game <- function(food, cost, pH, maxIter = 1000) {
  dt = 0.1
  p = c(pH, (1-pH))
  p.hist = matrix(0, nrow=maxIter, ncol =2)
  p.hist[1,] = p
  pay.mat = mk.pay.mat(food, cost)
  for (i.iter in 2:maxIter) {
     Ws <- calc.W(pay.mat, p)
     p = p + p*(Ws$W - Ws$W.ave)/Ws$W.ave*dt
     p.hist[i.iter, ] = p
  }
  plot(p.hist[,1],type='l', ylim=c(-0.1, 1.1), lwd=2, ylab="proportion", xlab="time")
  lines(p.hist[,2],type='l',col='red',lwd=2)
  legend("topleft",c("Hawk","Dove"),col=c("black","red"),lwd=c(2,2))
 }

# cake game
n.cake = 10
pay.mat = matrix(0,n.cake+1,n.cake+1)
for (i.cake in 1:n.cake){
  pay.mat[(i.cake+1),1:(n.cake-i.cake+1)] =i.cake
}
p = runif(11)
p = p/sum(p)

広域システム特別講義II L05

card.value<-function(adj.cards) {
  sum.cards=sum(adj.cards)
  if (any(adj.cards==1) & sum.cards<=11) {
    sum.cards=sum.cards+10;
    usableA=1          #true
  } else {usableA=2}  #false
  return(c(sum.cards,usableA))
}

# function to calc. reward
calc.reward<-function(p.val,d.val) {
  if (p.val>21) { reward=-1
  } else {if (d.val>21) { reward=1
  } else {if (p.val==d.val) {reward=0
  } else{ reward=ifelse(p.val>d.val,1,-1)}
  }}}

# main function
BJ_MC_fixedPolicy<-function(policy=20,maxIter=1e6){
  rew.sum=array(0,dim=c(10,10,2))
  rew.count=array(0,dim=c(10,10,2))
  for (i_play in 1:maxIter) {
    cards=sample(rep(1:13,4))
    player=cards[1:2];  adj.player=pmin(player,10)
    dealer=cards[3:4];  adj.dealer=pmin(dealer,10)
    cards=cards[-(1:4)]
    d.val=card.value(adj.dealer)
    p.val=card.value(adj.player)
    state.hist=c(adj.dealer[1],p.val[1],p.val[2])
    while (p.val[1] < policy) {
      player=c(player,cards[1]); adj.player=pmin(player,10)
      cards=cards[-1]
      p.val=card.value(adj.player)
      state.hist=rbind(state.hist,c(adj.dealer[1],p.val[1],p.val[2]))
    }
    while (d.val[1] < 17) {
      dealer=c(dealer,cards[1]); adj.dealer=pmin(dealer,10)
      cards=cards[-1]
      d.val=card.value(adj.dealer)
    }
    rew=calc.reward(p.val[1],d.val[1])
    n.state=nrow(state.hist)
    if (is.null(n.state)) {
      n.state=1
      state.hist=t(as.matrix(state.hist))
    }
    for (i_state in 1:n.state) {
      if (state.hist[i_state,2] > 11 & state.hist[i_state,2] < 22) {
        rew.sum[state.hist[i_state,1],(state.hist[i_state,2]-11),state.hist[i_state,3]] = rew.sum[state.hist[i_state,1],(state.hist[i_state,2]-11),state.hist[i_state,3]]+rew
        rew.count[state.hist[i_state,1],(state.hist[i_state,2]-11),state.hist[i_state,3]]=rew.count[state.hist[i_state,1],(state.hist[i_state,2]-11),state.hist[i_state,3]]+1
      }
    }
  }
  return(rew.sum/rew.count)
}

# function 2 plot results
plot.BJ_MC<-function(V){
  par(mfrow=c(1,2))
  image(V[,,1],main="with usable Ace",xaxt='n',yaxt='n',
        xlab="Dealer showing",ylab="Player sum")
  axis(1,at=seq(0,1,length.out=10),label=c("A",paste(2:10)))
  axis(2,at=seq(0,1,length.out=10),label=12:21)
  image(V[,,2],main="without usable Ace",xaxt='n',yaxt='n',
        xlab="Dealer showing",ylab="Player sum")
  axis(1,at=seq(0,1,length.out=10),label=c("A",paste(2:10)))
  axis(2,at=seq(0,1,length.out=10),label=12:21)
}

V=BJ_MC_fixedPolicy(17)
plot.BJ_MC(V)

BJ_MC<-function(maxIter=1e6){
  rew.sum=array(0,dim=c(10,10,2,2))
  rew.count=array(1,dim=c(10,10,2,2))
  Q=array(0,dim=c(10,10,2))
  V=array(0,dim=c(10,10,2))
  policy=array(sample(0:1,10*10*2,replace=T),dim=c(10,10,2))
  # policy: 1 = hit, 0 = stay
  for (i_play in 1:maxIter) {
    # initial draw
    cards=sample(c(rep(1:10,4),rep(10,12)))
    player=cards[1:2]
    dealer=cards[3:4]
    cards=cards[-(1:4)]
    d.val=card.value(dealer)
    p.val=card.value(player)

    while( p.val[1] < 12 ) {
      player=c(player,cards[1])
      cards=cards[-1]
      p.val=card.value(player)
    }
    action=sample(0:1,1)
    state.hist=c(dealer[1],p.val[1],p.val[2],(action+1))

    # player's action
    while (action==1 & p.val[1]<22) {
      player=c(player,cards[1])
      cards=cards[-1]
      p.val=card.value(player)
      state.hist=rbind(state.hist,c(dealer[1],p.val[1],p.val[2],(action+1)))
      if (p.val[1]<22) {
        action=policy[dealer[1],(p.val[1]-11),p.val[2]]
      }
    }

    # dealer's action
    while (d.val[1]<17) {
      dealer=c(dealer,cards[1])
      cards=cards[-1]
      d.val=card.value(dealer)
    }
    rew=calc.reward(p.val[1],d.val[1])
    n.state=nrow(state.hist)
    if (is.null(n.state)) {
      n.state=1
      state.hist=t(as.matrix(state.hist))
    }
    for (i_state in 1:n.state) {
      if (state.hist[i_state,2]>11 & state.hist[i_state,2]<22) {
        ind=state.hist[i_state,]-c(0,11,0,0)
        rew.sum[ind[1],ind[2],ind[3],ind[4]]= rew.sum[ind[1],ind[2],ind[3],ind[4]]+rew
        rew.count[ind[1],ind[2],ind[3],ind[4]]=rew.count[ind[1],ind[2],ind[3],ind[4]]+1
        Q=rew.sum/rew.count;
        policy[,,1]=Q[,,1,1] < Q[,,1,2]
        policy[,,2]=Q[,,2,1] < Q[,,2,2]
      }
    }
  }
  V[,,1]=(rew.sum[,,1,1]+rew.sum[,,1,2])/(rew.count[,,1,1]+rew.count[,,1,2])
  V[,,2]=(rew.sum[,,2,1]+rew.sum[,,2,2])/(rew.count[,,2,1]+rew.count[,,2,2])
  return(list(policy,V,Q))
}


# function 2 plot results
plot.BJ_MC2<-function(V){
  par(mfrow=c(2,2))
  image(V[[2]][,,1],main="Utility with usable Ace",xaxt='n',yaxt='n',
        xlab="Dealer showing",ylab="Player sum")
  axis(1,at=seq(0,1,length.out=10),label=c("A",paste(2:10)))
  axis(2,at=seq(0,1,length.out=10),label=12:21)
  image(V[[2]][,,2],main="Utility without usable Ace",xaxt='n',yaxt='n',
        xlab="Dealer showing",ylab="Player sum")
  axis(1,at=seq(0,1,length.out=10),label=c("A",paste(2:10)))
  axis(2,at=seq(0,1,length.out=10),label=12:21)
  image(V[[1]][,,1],main="Policy with usable Ace",xaxt='n',yaxt='n',
        xlab="Dealer showing",ylab="Player sum")
  axis(1,at=seq(0,1,length.out=10),label=c("A",paste(2:10)))
  axis(2,at=seq(0,1,length.out=10),label=12:21)
  image(V[[1]][,,2],main="Policy without usable Ace",xaxt='n',yaxt='n',
        xlab="Dealer showing",ylab="Player sum")
  axis(1,at=seq(0,1,length.out=10),label=c("A",paste(2:10)))
  axis(2,at=seq(0,1,length.out=10),label=12:21)
}

V=BJ_MC(5e6)
plot.BJ_MC2(V)

# TD0 model
TD0.ex1<-function(maxItr,alpha,gamma) {
  V=c(0,rep(0.5,5),0)
  V.hist=matrix(0,nrow=maxItr+1,ncol=5)
  V.hist[1,]=V[2:6]
  P.act=matrix(0.5,ncol=7,nrow=2)
  for (i_rep in 1:maxItr) {
    state=5
    while (state!=1 & state!=7) {
      action=sample(c(-1,1),1,prob=P.act[,state])
      state.old=state
      state=state+action
      r=ifelse(state==7,1,0)
      V[state.old]=V[state.old]+alpha*(r+gamma*V[state]-V[state.old])
    }
    V.hist[(i_rep+1),]=V[2:6]
  }
  return(V.hist)
}

# (re)creating Fig 6.6
true.V=1:5*(1/6);
res=TD0.ex1(1000,0.1,1)
par(mfrow=c(1,1))
plot(true.V,type='o',pch=15,ylim=c(0,1),ylab="Value",xaxt="n",
     xlab="State",xlim=c(0.5,5.5),cex=2,lwd=2)
axis(1,at=1:5,labels=c("A","B","C","D","E"))
cols=c('red','blue','green','cyan','magenta')
ns=c(1,2,11,101,1001)
for (i_lines in 1:5) {
  lines(res[ns[i_lines],],type='o',pch=15+i_lines,cex=2,lwd=2,col=cols[i_lines])
}
legend('topleft',c('True value','t=0','t=1','t=10','t=100','t=1000'),
       col=c('black',cols),pch=15:20,lwd=1.5)

act.V=matrix(c(1,0,0,1,-1,0,0,-1),nrow=4,byrow=T)
wind=matrix(c(0,0,0,0,0,0,1,0,1,0,1,0,2,0,2,0,1,0,0,0),byrow=T,nrow=10)
goal=c(4,8)
Qs=array(0,dim=c(7,10,4))
for (i_rep in 1:maxItr) {
  state=c(4,1) # start
  if (runif(1) > epsilon) {
    move=which.max(Qs[state[1],state[2],])
  } else { move=sample(1:4,1)}
  while (!all(state==goal)) {
    st.old=state
    mv.old=move
    state=state+act.V[move,]+wind[state[2],]
    if (state[1]<1) {state[1]=1}
    if (state[1]>7) {state[1]=7}
    if (state[2]<1) {state[2]=1}
    if (state[2]>10) {state[2]=10}
    if (runif(1) > epsilon) {
      move=which.max(Qs[state[1],state[2],])
    } else { move=sample(1:4,1)}
    rew=ifelse(all(state==goal),0,-1)
    Qs[st.old[1],st.old[2],mv.old]=Qs[st.old[1],st.old[2],mv.old]
    +alpha*(rew+gamma* Qs[state[1],state[2],move]
            -Qs[st.old[1],st.old[2],mv.old])
  }
}
return(Qs)
}
# running example
Qs=sarsa.ex6.5(5e6,0.1,1,0.1)
# sim optimal actions
state=c(4,1);goal=c(4,8);
state.hist=state
while (!all(state==goal)) {
  moveID=which.max(Qs[state[1],state[2],])
  state=state+act.V[moveID,]+wind[state[2],]
  if (state[1]<1) {state[1]=1}
  if (state[1]>7) {state[1]=7}
  if (state[2]<1) {state[2]=1}
  if (state[2]>10) {state[2]=10}
  state.hist=rbind(state.hist,state)
}
# plotting results
plot(0,0,type='n',xlim=c(0,11),ylim=c(0,8),xlab="",ylab="",
     main="Learned policies -- Sarsa")
lines(1,4,type='p',pch=19,col='red',cex=2)
lines(8,4,type='p',pch=19,col='red',cex=2)
dirs=c("up","right","down","left" )
for (i_row in 1:7) {
  for (i_col in 1:10) {
    best.move=dirs[which.max(Qs[i_row,i_col,])]
    text(i_col,i_row,best.move)
  }
}
lines(state.hist[,2],state.hist[,1],col="red",lwd=2)

# Q learning
Qlearn.ex6.5<-function(maxItr,alpha,gamma,epsilon) {
  # field size: 7row x 10column
  # horizontal move ->  COLUMN
  # vertical move     ->  ROW
  # effect of wind     ->  ROW
  # actions: 1-up, 2-right, 3-down, 4-left
  act.V=matrix(c(1,0,0,1,-1,0,0,-1),nrow=4,byrow=T)
  wind=matrix(c(0,0,0,0,0,0,1,0,1,0,1,0,2,0,2,0,1,0,0,0),byrow=T,nrow=10)
  goal=c(4,8)
  Qs=array(0,dim=c(7,10,4))
  for (i_rep in 1:maxItr) {
    state=c(4,1) # start
    while (!all(state==goal)) {
      if (runif(1) > epsilon) {
        move=which.max(Qs[state[1],state[2],])
      } else { move=sample(1:4,1)}
      sIDX=state
      state=state+act.V[move,]+wind[state[2],]
      if (state[1]<1) {state[1]=1}
      if (state[1]>7) {state[1]=7}
      if (state[2]<1) {state[2]=1}
      if (state[2]>10) {state[2]=10}
      max.Q=max(Qs[state[1],state[2],])
      rew=ifelse(all(state==goal),0,-1)
      Qs[sIDX[1],sIDX[2],move]=Qs[sIDX[1],sIDX[2],move]
      +alpha*(rew+gamma* max.Q-Qs[sIDX[1],sIDX[2],move])
    }
  }
  return(Qs)
}

Qs=Qlearn.ex6.5(1e6,0.05,1,0.1)
# sim optimal actions
state=c(4,1);goal=c(4,8);
state.hist=state
while (!all(state==goal)) {
  moveID=which.max(Qs[state[1],state[2],])
  state=state+act.V[moveID,]+wind[state[2],]
  if (state[1]<1) {state[1]=1}
  if (state[1]>7) {state[1]=7}
  if (state[2]<1) {state[2]=1}
  if (state[2]>10) {state[2]=10}
  state.hist=rbind(state.hist,state)
}
# plotting results
plot(0,0,type='n',xlim=c(0,11),ylim=c(0,8),xlab="",ylab="",
     main="Learned policies -- Q-learning")
lines(1,4,type='p',pch=19,col='red',cex=2)
lines(8,4,type='p',pch=19,col='red',cex=2)
dirs=c("up","right","down","left" )
for (i_row in 1:7) {
  for (i_col in 1:10) {
    best.move=dirs[which.max(Qs[i_row,i_col,])]
    text(i_col,i_row,best.move)
  }
}
lines(state.hist[,2],state.hist[,1],col="red",lwd=2)

# cognitive modeling
ALC.init<-function(size) {
  # size[1] = input dimension
  # size[2] = number of exemplars
  # size[3] = number of categories
  alpha=matrix(1,nrow=1,ncol=size[1])/size[1]
  w=matrix(rnorm(size[3]*size[2])*0.1,ncol=size[2])
  c=2            # gain
  phi=3          # decision strength
  lrA=0.2        # learning rate for attentions
  lrW=0.1        # learning rate for associations
  return(list(alpha = alpha,
              w = w,
              lrA = lrA,
              lrW = lrW,
              c = c,
              phi = phi))
}

# alcove forward process
alcove.forward <- function(parmSet, inp, exemplar){
  diff= matrix(abs(matrix(1,n.exemp,1)%*%inp-exemplar),nrow=nrow(exemplar))
  exemp=exp(-parmSet$c*(diff%*%t(parmSet$alpha)))
  out=parmSet$w%*%exemp
  return(list(diff = diff, exemp = exemp, out = out))
}

# alcove backward process
alcove.backward <- function(res.forward,parmSet, target){
  err=target-res.forward$out
  dW=parmSet$lrW*res.forward$exemp%*%t(err)
  dA=-parmSet$lrA*(t(err)%*%parmSet$w*t(res.forward$exemp))%*%res.forward$diff*parmSet$c
  return(list(dW = dW, dA = dA))
}

exemplar = matrix(c(0,0,0,
                    0,0,1,
                    0,1,0,
                    0,1,1,
                    1,0,0,
                    1,0,1,
                    1,1,0,
                    1,1,1),byrow=T,nrow=8)
inp = exemplar

target1 = matrix(c(0,0,0,0,1,1,1,1,
                   1,1,1,1,0,0,0,0),nrow=8)

target2 = matrix(c(1,1,0,0,0,0,1,1,
                   0,0,1,1,1,1,0,0),nrow=8)

target3 = matrix(c(1,1,1,0,0,1,0,0,
                   0,0,0,1,1,0,1,1),nrow=8)

target4 = matrix(c(1,1,1,0,1,0,0,0,
                   0,0,0,1,0,1,1,1),nrow=8)

target5 = matrix(c(1,1,1,0,0,0,0,1,
                   0,0,0,1,1,1,1,0),nrow=8)

target6 = matrix(c(1,0,0,1,0,1,1,0,
                   0,1,1,0,1,0,0,1),nrow=8)

# evo. game
food = 6; cost = 10
A = 0.5*(food-cost); B = food
C = 0; D = food/2
pay.mat = matrix(c(A,B,C,D),nrow=2,byrow=TRUE)
dt = 0.01;max_time=1000
p = rep(0,max_time)
q = rep(0,max_time)
p[1] = 0.2
q[1] = 1 - p[1]
for (t in 1:max_time){
  prob.mat = outer(c(p[t],q[t]),c(p[t],q[t]))
  W.ave = sum(prob.mat*pay.mat)
  W.h = sum(c(p[t],q[t])*pay.mat[1,])
  W.d = sum(c(p[t],q[t])*pay.mat[2,])
  p[(t+1)] = p[t]+(p[t]*(W.h-W.ave)/W.ave)*dt
  q[(t+1)] = q[t]+(q[t]*(W.d-W.ave)/W.ave)*dt
}

plot(p,type='l',lwd=2,col='red',ylim=c(0,1))
lines(q,type='l',lwd=2,col='blue')

# cake game
n.cake = 10
pay.mat = matrix(0,n.cake+1,n.cake+1)
for (i.cake in 1:n.cake){
  pay.mat[(i.cake+1),1:(n.cake-i.cake+1)] =i.cake
}
Posted in UT

データ解析基礎論B テスト理論

dat<-read.table("http://peach.l.chiba-u.ac.jp/course_folder/survey_data01.txt")

install.packages("psych")
library("psych")
ca<-alpha(dat)

dat<-read.csv("http://peach.l.chiba-u.ac.jp/course_folder/survey2.csv")
image(cor(dat)[10:1,1:10])

ca1_5 = alpha(dat[,1:5])
ca1_5

ca6_10 = alpha(dat[,6:10])
ca6_10

F1<-factanal(dat[,1:5],1)
F2<-factanal(dat[,6:10],1)

library(sem)
fa.model=cfa(reference.indicator=FALSE)
F1: q1,q2,q3,q4,q5 
F2: q6,q7,q8,q9,q10

fa.model<-update(fa.model)
delete, F1<->F2

fa.result<-sem(fa.model, cov(dat), 300)
summary(fa.result)

install.packages("ltm") 
library(ltm)
dat<-read.table("http://peach.l.chiba-u.ac.jp/course_folder/survey_data01.txt")
dat = dat-1
descript(dat)
irt1P<-rasch(dat)
plot.rasch(irt1P)
GoF.rasch(irt1P)
person.fit(irt1P)
item.fit(irt1P)
theta = factor.scores(irt1P)
cor(rowSums(theta[[1]][,1:10]),theta[[1]]$z1)

irt2P<-ltm(dat~z1) 
plot.ltm(irt2P)
person.fit(irt2P)
item.fit(irt2P)
theta2P = factor.scores(irt2P)
cor(rowSums(theta2P[[1]][,1:10]),theta2P[[1]]$z1)