Disclaimer

このcourselogにあるコードは、主に学部生・博士課程前期向けの講義・演習・実習で例として提示しているもので、原則直感的に分かりやすいように書いているつもりです。例によってはとても非効率なものもあるでしょうし、「やっつけ」で書いているものあります。また、普段はMATLABを使用していますので、変な癖がでているかもしれません。これらの例を使用・参考にする場合はそれを踏まえてたうえで使用・参考にして下さい。2013.09月のサーバー移行の際にバックアップに失敗して、2013前期までの内容を失ってしまいました…

A Brief Guide to R (Rのコマンドの手引きおよび例)はこちらをどうぞ

データ解析基礎論A week12

dat<-read.csv("http://peach.l.chiba-u.ac.jp/course_folder/datW03R.txt")

means<-tapply(dat$shoesize,list(dat$gender, dat$affil),mean)
Ns<-tapply(dat$shoesize,list(dat$gender, dat$affil),length)
sds<-tapply(dat$shoesize,list(dat$gender, dat$affil),sd)
sems<-sds/sqrt(Ns)
plot(c(0,1),means[,1],type='o',col='skyblue', xlim=c(-0.1,1.1), lwd=2, 
     cex=2, pch=20, ylim=c(min(means)*0.975, max(means)*1.025), xlab="gender", ylab="shoesize",
     xaxt="n")
axis(1,c(0,1),c("female","male"))
lines(c(0,1),means[,2],type='o',col='orange',lwd=2,cex=2,pch=20)
lines(c(0,0),c(means[1,1]-sems[1,1],means[1,1]+sems[1,1]),col="skyblue",lwd=2)
lines(c(0,0),c(means[1,2]-sems[1,2],means[1,2]+sems[1,2]),col="orange",lwd=2)
lines(c(1,1),c(means[2,1]-sems[2,1],means[2,1]+sems[2,1]),col="skyblue",lwd=2)
lines(c(1,1),c(means[2,2]-sems[2,2],means[2,2]+sems[2,2]),col="orange",lwd=2)
legend("topleft",c("CogSci","PsySci"),col=c("skyblue","orange"),lwd=2)


interaction.plot(dat$gender,
                 dat$affil,
                 dat$shoesize, 
                 pch=c(20,20), 
                 col=c("skyblue","orange"), 
                 xlab="gender", ylab="shoesize", 
                 lwd=3,type='b',cex=2,
                 trace.label="Affiliation")

dat.aov=aov(shoesize~gender*affil, data=dat)
dat.aov.sum=summary(dat.aov)

# testing simple main effect of gender
means<-tapply(dat$shoesize, list(dat$gender,dat$affil), mean)
SS_gen_CS<- 5*(means[2,1]^2 + means[1,1]^2 -0.5*sum(means[,1])^2) # SS_gender CS
SS_gen_PS<- 5*(means[2,2]^2 + means[1,2]^2 -0.5*sum(means[,2])^2) # SS_gender PS
dat.aov.sum=summary(dat.aov)   # ANOVA table
MSe=dat.aov.sum[[1]][4,3]      # MSE from ANOVA table or MSe=0.62
dfE=dat.aov.sum[[1]][4,1]      # DF for error or dfE=16
dfG=1                          # DF for gender
F_gen_CS=(SS_gen_CS/dfG)/MSe   # F-value for gender effect given CS
F_gen_PS=(SS_gen_PS/dfG)/MSe   # F-value for gender effect given PS
P_gen_CS=1-pf(F_gen_CS,1,dfE)  # p-value for gender effect given CS
P_gen_PS=1-pf(F_gen_PS,1,dfE)  # p-value for gender effect given PS

# testing simple main effect of affiliation
SS_affil_F<- 5*(means[1,1]^2+means[1,2]^2-0.5*sum(means[1,])^2) #SS_affil | F
SS_affil_M<- 5*(means[2,1]^2+means[2,2]^2-0.5*sum(means[2,])^2) #SS_affil | M
dfA=1				          # DF for affil
F_affil_F=SS_affil_F/dfA/MSe         # F-value for affiliation effect | F
F_affil_M=SS_affil_M/dfA/MSe         # F-value for affiliation effect | M
P_affil_F=1-pf(F_affil_F,1,dfE)      # p-value for affiliation effect | F
P_affil_M=1-pf(F_affil_M,1,dfE)      # p-value for affiliation effect | M

# ANOVA 2x4 
dat<-read.csv("http://www.matsuka.info/data_folder/dktb321.txt")
interaction.plot(dat$duration,dat$method,dat$result, pch=c(20,20), col=c("skyblue","orange"), ylab="score", xlab="Duration",lwd=3,type='b',cex=2,trace.label="Method")
dat.aov=aov(result~method*duration,data=dat)

means<-tapply(dat$result,list(dat$method,dat$duration),mean)
ssM_1=5*(sum(means[,1]^2)-0.5*(sum(means[,1])^2))
ssM_2=5*(sum(means[,2]^2)-0.5*(sum(means[,2])^2))
ssM_3=5*(sum(means[,3]^2)-0.5*(sum(means[,3])^2))
ssM_4=5*(sum(means[,4]^2)-0.5*(sum(means[,4])^2))
MSe=mod2.sum[[1]][4,3]
DFe=mod2.sum[[1]][4,1]
DFm=1
fM_1=(ssM_1/DFm)/MSe
1-pf(fM_1,DFm,DFe)

ssD_X=5*(sum(means[1,]^2)-1/4*(sum(means[1,])^2))
ssD_Y=5*(sum(means[2,]^2)-1/4*(sum(means[2,])^2))
DFd=3
fD_X=(ssD_X/DFd)/MSe
fD_Y=(ssD_Y/DFd)/MSe
1-pf(fD_X,DFd,DFe)
1-pf(fD_X,DFd,DFe)

qv=qtukey(0.95,DFd+1,DFe)
hsd=qv*(sqrt(MSe/5))
print(diffM<-outer(means[1,],means[1,],"-"))           
abs(diffM)>hsd    

# useful r functions
source("http://peach.l.chiba-u.ac.jp/course_folder/tsme2017.R")
CRF.tsme(dat.aov, dat)
   

データ解析基礎論A week11

f=c(24.1,23.9,24.4,24.4,23.5)
m=c(25.6,26.1,25.8,25.9,26)

G.mean=mean(c(f,m))
ss.total=sum((c(f,m)-G.mean)^2)
ss.tr=sum((mean(f)-G.mean)^2)*5+sum((mean(m)-G.mean)^2)*5
ss.error=sum((f-mean(f))^2)+sum((m-mean(m))^2)

dat<-data.frame(ssize=c(f,m),gender=c(rep("f",5),rep("m",5)))
dat.aov<-aov(ssize ~ gender,data=dat)
dat<-read.table("http://www.matsuka.info/data_folder/aov01.txt")
dat.aov<-aov(shoesize~club, dat)

qT<-qtukey(0.95, 3, 67)
HSD<-qT*sqrt((2.243*(1/23+1/24))/2)


dat<-read.csv("http://www.matsuka.info/data_folder/dktb312.txt",
              col.names=c("dump","method","result"))
levels(dat$method)<-c('free','repeat','sentence','image')
dat.aov<-aov(result~method, data=dat)
summary(dat.aov)


dat.means<-tapply(dat$result,dat$method,mean)
cont=c(-3,1,1,1)
bunshi=sum(cont*dat.means)
bunbo=sqrt(5.29*(sum((cont^2)/8)))

t.value=bunshi/bunbo
2*(1-pt(t.value,28))

dat.means<-tapply(dat$result,dat$method,mean)
cont=c(-3,1,1,1)
bunshi=sum(cont*dat.means)
bunbo=sqrt(5.29*(sum((cont^2)/8)))
F.value=(bunshi/bunbo)^2
new.F=3*qf(0.95,3,28)

データ解析基礎論A. Week10

freq<-c(33,37,7,23)
pref<-factor(c('soba','udon','soba','udon'))
region<-factor(c('east','east','west','west'))
dat<-data.frame(pref,region,freq)
dat.table=table(pref,region)
dat.table[cbind(pref,region)]<-freq

ME.lrt=2*sum((freq)*log(freq/25))
dat.loglinCE_A<-loglin(dat.table,list(1), fit=T,param=T)
dat.loglinCE_B<-loglin(dat.table,list(2), fit=T,param=T)
dat.loglinIND<-loglin(dat.table,list(1,2), fit=T,param=T)

ME.lrt-dat.loglinCE_A$lrt				
1-pchisq(ME.lrt-dat.loglinCE_A$lrt,1)		

dat.loglinCE_A$lrt-dat.loglinIND$lrt	
1-pchisq(dat.loglinCE_A$lrt-dat.loglinIND$lrt,1)	

dat.loglinIND$lrt-dat.loglinSAT$lrt            
1-pchisq(dat.loglinIND$lrt-dat.loglinSAT$lrt,1) 	

freq<-c(9,5,2,4,16,10,26,28)
gender<-factor(c(rep('female',4),c(rep('male',4))))
affil<-factor(rep(c('L','L','E','E'),2))
circle<-factor(rep(c('tennis','astro'),4))
dat<-data.frame(gener,affil,circle,freq)

dat.table<-table(gender,affil,circle)
dat.table[cbind(gender,affil,circle)]<-freq

dat.loglin2<-loglin(dat.table,list(1), fit=T,param=T)

dat.loglin2<-loglin(dat.table,list(1), fit=T,param=T)
dat.loglin3<-loglin(dat.table,list(1,3), fit=T,param=T)
dat.loglin4<-loglin(dat.table,list(1,2,3), fit=T,param=T)
dat.loglin5<-loglin(dat.table,list(c(1,3),2), fit=T,param=T)
dat.loglin6<-loglin(dat.table,list(c(1,3),c(1,2)), fit=T,param=T)
dat.loglin7<-loglin(dat.table,list(c(1,3),c(1,2),c(2,3)), fit=T,param=T)
dat.loglin8<-loglin(dat.table,list(c(1,2,3)), fit=T,param=T)

基礎実習 課題2 w. 解答例

基礎実習M 課題2
提出期限: 2017.06.14

N x Nのビンゴゲームが完了するまでの時間(数字の読み上げ回数)を計算・シミュレーションするRのコードは以下の通りです。

size = 5  # N
rand.v = sample(1:size^2)
bingo=matrix(rand.v, size)
rand.call = sample(1:size^2)
terminate = F; counter = 0
while (terminate == F){
  counter = counter + 1
  bingo[which(bingo==rand.call[counter])]=0
  Rsum = rowSums(bingo)
  Csum = colSums(bingo)
  Dsum = c(sum(diag(bingo)),sum(diag(bingo[,size:1])))
  if (any(c(Rsum,Csum,Dsum)==0)) {terminate = T}
}
> print(counter)
[1] 12

まず、このコードを参考にして、sizeを引数、counterを出力とする関数を作成してください。
次に新たに作成した関数をもとに、size = 1,3,5,7,9,11,13,15のビンゴゲームが完了する平均時間を調べてください。安定した平均値を求める為に、各条件100回以上繰り返してください。
最後に、平均値を以下のように可視化してください。
Bingo

# 解答例
# "BINGO" function
n.bingo <- function(g.size) {
  rand.v = sample(1:g.size ^ 2)
  bingo = matrix(rand.v, g.size)
  rand.call = sample(1:g.size ^ 2)
  terminate = F; counter = 0
  while (terminate == F) {
    counter = counter + 1
    bingo[which(bingo == rand.call[counter])] = 0
    Rsum = rowSums(bingo)
    Csum = colSums(bingo)
    Dsum = c(sum(diag(bingo)), sum(diag(bingo[, g.size:1])))
    if (any(c(Rsum, Csum, Dsum) == 0)) {
      terminate = T
    }
  }
  return(n.call = counter)
}
# simulation
nRep = 1000
g.sizes = seq(1,15,2)
res = matrix(0,nRep, length(g.sizes))
for (i.rep in 1:nRep){
  for (i.size in 1:length(g.sizes)){
    res[i.rep, i.size] = n.bingo(g.sizes[i.size])
  }
}
# plotting results
plot(colMeans(res)~g.sizes,type='o',xlim = c(0,16), ylim = c(-1,180),pch = 19, 
     col = "orange3",lwd =3,cex =2,xlab="size of game", ylab="Average duration of game")

認知情報解析 DL ch5までのまとめ

# ReLU
relu.forwd <- function(x){
  return(pmax(x,0))
}
relu.bckwd <- function(z, dout){
  dout[which(z <= 0)] = 0
  return(dout)
}
# sigmoid
sigmoid.forwd <- function(x){
  return(1/(1+exp(-x)))
}
sigmoid.bckwd <- function(z, dout){
  return(dout*(1-z)*z)
}
# Affine
affine.forwd <- function(x, W, b){
  return(x%*%W + matrix(1, nrow = nrow(x), ncol = 1)%*%b)
}
affine.bckwd <- function(x, W, dout){
  dx = dout%*%t(W)
  dW = t(x)%*%dout
  db = colSums(dout)
  return(list(dx = dx, dW = dW, db = db))
}
# softmax with CE
softmax.forwd <- function(x, target){
  max.x = apply(x,1,max)
  C = ncol(x)
  x = x - max.x%*%matrix(1,nrow=1,ncol=C)
  y = exp(x)/rowSums(exp(x))
  delta = 1e-7;
  R = nrow(as.matrix(y))
  return(list(smx = y, ce = -sum(target*log(y + delta))/R))
}
softmax.bckwd <- function(smx, target){
  R = nrow(smx)
  return((smx-target)/R)
}

activation <- function(A, target, actFun){
  if (actFun == "sigmoid"){
    return(sigmoid.forwd(A))
  }
  if (actFun == "relu"){
    return(relu.forwd(A))
  }
  if (actFun == "softmax"){
    return(softmax.forwd(A, target))
  }
}

Dact <- function(z, smx, target, dout, actFun){
  if (actFun == "sigmoid"){
    return(sigmoid.bckwd(z, dout))
  }
  if (actFun == "relu"){
    return(relu.bckwd(z, dout))
  }
  if (actFun == "softmax"){
    return(softmax.bckwd(smx, target))
  }
}

# function for initialization 
init.network <- function(n.neurons, actFun){
  n.layer = length(n.neurons)
  W = list(); b = list()
  for (i.layer in 1:(n.layer-1)){
    W[[i.layer]] = matrix(rnorm(n.neurons[i.layer]*n.neurons[(i.layer+1)],sd = 0.1),
                          nrow=n.neurons[i.layer])
    b[[i.layer]] =  matrix(rnorm(n.neurons[(i.layer+1)],sd = 0.1), nrow = 1)
  }
  return(list(W = W, b = b, actFun = actFun))
}

cu.nnet = function(train.x, train.y, net, HP = c(10,1000,0.05)){
# HP: Hyperparameters
# HP[1] = batch size
# HP[2] = n iteration
# HP[3] = learning rate
  n.layer <- length(net$W)
  loss = rep(0,HP[2])
  A = list(); z = list(); Dact = list(); Daff = list()
  for (i.iter in 1:HP[2]){
    batch_mask = sample(1:nrow(train.x), HP[1])
    x.batch = train.x[batch_mask,]
    t.batch = train.y[batch_mask,]  
    for (i.layer in 1:n.layer){
      if (i.layer == 1){
        x = x.batch
      } else {
        x = z[[(i.layer - 1)]]
      }
      A[[i.layer]] = affine.forwd(x, net$W[[i.layer]], net$b[[i.layer]])
      z[[i.layer]] = activation(A[[i.layer]], t.batch, net$actFun[i.layer])
    }
    loss[i.iter] = z[[i.layer]]$ce
    smx = z[[i.layer]]$smx
    for (i.layerR in n.layer:1){
      if (i.layerR == n.layer){
        dout = 1
      } else {
        dout = Daff[[(i.layerR+1)]]$dx
      }
      Dact[[i.layerR]] = Dact(z[[i.layerR]], smx, t.batch, dout, net$actFun[i.layerR])
      if (i.layerR==1){
        x = x.batch
      } else {
        x = A[[(i.layerR-1)]]
      }
      Daff[[i.layerR]] = affine.bckwd(x, network$W[[i.layerR]], Dact[[i.layerR]])
    }
    for (i.layer in 1:n.layer){
      net$W[[i.layer]] = net$W[[i.layer]] - HP[3]*Daff[[i.layer]]$dW
      net$b[[i.layer]] = net$b[[i.layer]] - HP[3]*Daff[[i.layer]]$db
    }
  }
  return(list(loss = loss, net = net))
}

# iris
train.x = as.matrix(iris[,1:4])
train.y.temp = as.numeric(iris[,5])
train.y = matrix(0,nrow = nrow(train.x), ncol =3)
train.y[which(train.y.temp==1), 1]=1
train.y[which(train.y.temp==2), 2]=1
train.y[which(train.y.temp==3), 3]=1
actF = c("relu","softmax")
network = init.network(c(4,15,3), actF)
res = cu.nnet(train.x, train.y, network, HP=c(15,2000,0.1))
plot(res$loss,type='l')

# MNIST
train <- read.csv('~/courses/CogMod/CMA2017/MNSTtrain.csv', header=TRUE)
test <- read.csv('~/courses/CogMod/CMA2017/MNSTtest.csv', header=TRUE)
train <- data.matrix(train)
test <- data.matrix(test)
train.x <- as.matrix(train[,-1]/255)
train.y.temp <- train[,1]
train.y = matrix(0,nrow = nrow(train.x), ncol = 10)
for (i in 1:nrow(train.x)){
  train.y[i,(train.y.temp[i]+1)]=1
}
actF = c("relu","relu","softmax")
network = init.network(c(784,100,50,10), actF)
res = cu.nnet(train.x, train.y, network,HP=c(100,2000,0.1))
plot(res$loss,type='l')

########################################################
#   with additional methods

init.network <- function(n.neurons, actFun, Opt, sdv){
  n.layer = length(n.neurons)
  W = list(); b = list(); 
  mW = list(); mb = list();    # momentum
  hW = list(); hb = list();    # adaGrad
  aMW = list(); aMb = list();  # adam M
  aVW = list(); aVb = list();  # adam V
  for (i.layer in 1:(n.layer-1)){
    if (nargs() == 3) {
      if (actFun[i.layer]=="sigmoid"){
        # Xavier
        sdv = 1/sqrt(n.neurons[i.layer])
      } else {
        # He - assumes ReLU
        sdv = sqrt(2/n.neurons[i.layer])
      }
    }
    W[[i.layer]] = matrix(rnorm(n.neurons[i.layer]*n.neurons[(i.layer+1)], sd = sdv),
                          nrow=n.neurons[i.layer])
    b[[i.layer]] =  matrix(rnorm(n.neurons[(i.layer+1)], sd = sdv), nrow = 1)
    if (Opt == "momentum"){
      mW[[i.layer]] = matrix(0, nrow=n.neurons[i.layer], ncol=n.neurons[(i.layer+1)])
      mb[[i.layer]] = matrix(0, nrow = 1, ncol=n.neurons[(i.layer+1)])
    } 
    if (Opt == "adagrad"){
      hW[[i.layer]] = matrix(0, nrow=n.neurons[i.layer], ncol=n.neurons[(i.layer+1)])
      hb[[i.layer]] = matrix(0, nrow = 1, ncol=n.neurons[(i.layer+1)])
    }
    if (Opt == "adam"){
      aMW[[i.layer]] = matrix(0, nrow=n.neurons[i.layer], ncol=n.neurons[(i.layer+1)])
      aMb[[i.layer]] = matrix(0, nrow = 1, ncol=n.neurons[(i.layer+1)])
      aVW[[i.layer]] = matrix(0, nrow=n.neurons[i.layer], ncol=n.neurons[(i.layer+1)])
      aVb[[i.layer]] = matrix(0, nrow = 1, ncol=n.neurons[(i.layer+1)])
    }
  }
  return(list(W = W, b = b, actFun = actFun, optimizer = Opt,
              mW=mW, mb=mb,
              hW=hW, hb=hb,
              aMW=aMW,aMb=aMb,aVW=aVW))
}

OPT<-function(net, Daff, HP){
  if (net$optimizer == "momentum"){
    return(Opt.mom(net, Daff, HP))
  }
  if (net$optimizer == "adagrad"){
    return(Opt.adagrad(net, Daff, HP))
  }
  if (net$optimizer == "adam"){
    return(Opt.adam(net, Daff, HP))
  }
}

Opt.mom <- function(net, Daff, HP){
# HP[3] = learning rate
# HP[4] = weight decay
# HP[5] = momentum
  n.layer <- length(net$W)
  for (i.layer in 1:n.layer){
    net$mW[[i.layer]] = HP[5]*net$mW[[i.layer]] 
                        - HP[3]*Daff[[i.layer]]$dW - HP[4]*net$W[[i.layer]]
    net$mb[[i.layer]] = HP[5]*net$mb[[i.layer]] 
                        - HP[3]*Daff[[i.layer]]$db - HP[4]*net$b[[i.layer]]
    net$W[[i.layer]] = net$W[[i.layer]] + net$mW[[i.layer]]
    net$b[[i.layer]] = net$b[[i.layer]] + net$mb[[i.layer]] 
  }
  return(net=net)
}

cu.nnet = function(train.x, train.y, net, HP = c(10,1000,0.05,0.01,0.1,0.999,0.9)){
# HP: Hyperparameters
# HP[1] = batch size
# HP[2] = n iteration
# HP[3] = learning rate
# HP[4] = weight decay
# HP[5] = momentum
# HP[6] = beta1 (adam)
# HP[7] = beta2 (adam)
  n.layer <- length(net$W)
  loss = rep(0,HP[2])
  A = list(); z = list(); Dact = list(); Daff = list()
  for (i.iter in 1:HP[2]){
    batch_mask = sample(1:nrow(train.x), HP[1])
    x.batch = train.x[batch_mask,]
    t.batch = train.y[batch_mask,]
    for (i.layer in 1:n.layer){
      if (i.layer == 1){
        x = x.batch
      } else {
        x = z[[(i.layer - 1)]]
      }
      A[[i.layer]] = affine.forwd(x, net$W[[i.layer]], net$b[[i.layer]])
      z[[i.layer]] = activation(A[[i.layer]], t.batch, net$actFun[i.layer])
    }
    loss[i.iter] = z[[i.layer]]$ce
    smx = z[[i.layer]]$smx
    for (i.layerR in n.layer:1){
      if (i.layerR == n.layer){
        dout = 1
      } else {
        dout = Daff[[(i.layerR+1)]]$dx
      }
      Dact[[i.layerR]] = Dact(z[[i.layerR]], smx, t.batch, dout, net$actFun[i.layerR])
      if (i.layerR==1){
        x = x.batch
      } else {
        x = A[[(i.layerR-1)]]
      }
      Daff[[i.layerR]] = affine.bckwd(x, net$W[[i.layerR]], Dact[[i.layerR]])
    }
    net = OPT(net, Daff, HP)
  }
  return(list(loss = loss, net = net))
}

# iris
train.x = as.matrix(iris[,1:4])
train.y.temp = as.numeric(iris[,5])
train.y = matrix(0,nrow = nrow(train.x), ncol =3)
train.y[which(train.y.temp==1), 1]=1
train.y[which(train.y.temp==2), 2]=1
train.y[which(train.y.temp==3), 3]=1
actF = c("relu","softmax")
network = init.network(c(4,15,3), actF, "momentum")
res = cu.nnet(train.x, train.y, network, HP=c(15,1000,0.01,0.0001,0.9,0.999,0.9))
hist(res$net$W[[1]])
plot(res$loss,type='l')

データ解析基礎論A week09

dat<-read.csv("http://www.matsuka.info/data_folder/datWA01.txt")
plot(dat$shoesize, dat$gender, pch=20, cex=3, xlab="Shoe size", 
     ylab="gender", col='blue', ylim = c(0.5, 2.5), yaxt="n")
axis(2, at = c(1,2),labels=c("female","male"))
dat.lm<-lm(as.numeric(dat$gender)~dat$shoesize,data=dat)
abline(dat.lm,lwd=4,col='red')

chisq.test(c(72,23,16,49),p=rep(40,4),rescale.p=T)
M=matrix(c(52,48,8,42),nrow=2)
chisq.test(M,correct=F)

plot(dat$shoesize, dat$gender, pch=20, cex=3, xlab="Shoe size", 
     ylab="gender", col='blue', ylim = c(0.5, 2.5), yaxt="n")
axis(2, at = c(1,2),labels=c("female","male"))
plot(dat$shoesize, dat$gender, pch=20, cex=3, xlab="Shoe size", 
     ylab="gender", col='blue', ylim = c(0.5, 2.5), yaxt="n")
axis(2, at = c(1,2),labels=c("female","male"))
abline(dat.lm,lwd=4,col='red')

p = seq(0.1,0.9,length.out = 20)
plot(p/(1-p),col="red",pch=20,cex=3)
plot(log(p/(1-p)),col="red",pch=20,cex=3)


dat.lr<-glm(gender~shoesize, family=binomial, data=dat)
x=seq((min(dat$shoesize)-2), (max(dat$shoesize)+2), 0.1)
keisu  = coef(dat.lr)
y.prob=1/(1+exp(-1*(keisu[1]+keisu[2]*x)))
plot(dat$shoesize, dat$gender, pch=20, cex=3, xlab="Shoe size", 
     ylab="gender", col='blue', ylim = c(0.5, 2.5), yaxt="n")
axis(2, at = c(1,2),labels=c("female","male"))
lines(x,y.prob+1,lty=2,lwd=4,col='green')
abline(dat.lm,lwd=4,col='red')

anova(dat.lr, test ="Chisq")

dat.lr0<-glm(gender~1,family="binomial",data=dat)
dat.lrS<-glm(gender~shoesize,family=binomial,data=dat)
dat.lrH<-glm(gender~h,family="binomial",data=dat)


dat<-read.csv("http://www.matsuka.info/data_folder/cda7-16.csv")
dat.glmAllAdd=glm(survival~age+Ncigarettes+NdaysGESTATION,family=binomial,data=dat)
dat.glmAllMult=glm(survival~age*Ncigarettes*NdaysGESTATION,family=binomial,data=dat)
stepAIC(dat.glmAllMult)