Disclaimer

このcourselogにあるコードは、主に学部生・博士課程前期向けの講義・演習・実習で例として提示しているもので、原則直感的に分かりやすいように書いているつもりです。例によってはとても非効率なものもあるでしょうし、「やっつけ」で書いているものあります。また、普段はMATLABを使用していますので、変な癖がでているかもしれません。これらの例を使用・参考にする場合はそれを踏まえてたうえで使用・参考にして下さい。
卒業論文に関する資料:[2015PDF] [word template] [latex template] [表紙] [レポートの書き方] [引用文献など]
A Brief Guide to R (Rのコマンドの手引きおよび例)はこちらをどうぞ

2020 基礎実習 R programming 2

N = 10
M = 10000
means = rep(0, M)
for (i_rep in 1:M){
  dat <- runif(N)
  means[i_rep] <- mean(dat)
}
hist(means, xlab = "Estmatedmeans", probability = T, 
  breaks = 30, xlim = c(0,1))
x.temp = seq(0, 1, length.out = 1000)
dens <- dnorm(x.temp, mean = 0.5, sd = (1/sqrt(12*N)))
lines(x.temp, dens, col = "red", lwd = 3)
legend("topright","theoretical desiity",lwd =3, col = "red")

X = 10
Y = 1
sumXY = X + Y

summation <- function(X,Y){
  sumXY = X + Y
  return(sumXY)
}
summation(X=1, Y=10)
summation(X=5, Y=2)

CLT_example <- function(N, M){
  means = rep(0, M)
  for (i_rep in 1:M){
    dat <- runif(N)
    means[i_rep] <- mean(dat)
  }
  hist(means, xlab = "Estmated means", probability = T, 
   breaks = 30, xlim =c(0,1))
  x.temp = seq(0, 1, length.out = 1000)
  dens <- dnorm(x.temp, mean = 0.5, sd = (1/sqrt(12*N)))
  lines(x.temp, dens, col = "red", lwd = 3)
  legend("topright","theoretical desiity",lwd =3, col = "red")
}

x0 = 1e5
y0 = 10
z0 = 0
dt = 0.01
T = 10000
alpha = 1e-5
beta = 0.1
x = y = z = rep(0,T)
x[1] = x0
y[1] = y0
z[1] = z0
for(t in 1:(T-1)){
  x[t+1] = x[t] - (alpha*x[t]*y[t])*dt
  y[t+1] = y[t] + (alpha*x[t]*y[t]-beta*y[t])*dt 
  z[t+1] = z[t] + (beta*y[t])*dt
}
plot(x=1:T, x, type = "l", col = 2, lwd = 3, ylim = c(0,x0),
  xlab = "Time", ylab = "Number of people")
lines(x=1:T, y, col = 3, lwd = 3)
lines(x=1:T, z, col = 4, lwd = 3)
legend("right", c("not infected", "infected","no longer infected"), col=2:4,lwd=3)

2020 基礎実習 R programming1

# creating example vectors
students = paste("s", 1:20, sep = "")
suite = rep(c("s","h","c","d"),13)
ns = sort(rep(1:13,4))
cards  = paste(suite, ns, sep="")

# samples
rand_select = sample(1:10, 2)
rand_perm = sample(1:10)
with_replacemnt = sample(0:1, 10, replace = T)
temp = sample(1:5, 5, replace = T)

# concrete example

# random selection
pointed <- sample(students,2)

# random permutation
randID= sample(1:20)
h1 = students[randID[1:10]]
h2 = students[randID[11:20]]

h1order = sample(h1)
h2order = sample(h2)

shuffled = sample(cards)
cardPlay = data.frame(p1 = shuffled[1:5], p2 = shuffled[6:10], p3 = shuffled[11:15])

# with replacement

# for loop 
num = 1:5
chr = c("a","b","c","d","e")
for (i in num){
  print(i)
}
for (i in chr){
  print(i)
}
for (i in num){
  print(chr[i])
}

p1 = p2 = p3 = rep("joker",5)
Ncard = 5
Nplayer = 3
cardSeq = seq(1, Ncard*Nplayer, Nplayer)
cardN = 0
for (i_card in cardSeq){
  cardN = cardN + 1
  p1[cardN] = shuffled[i_card]
  p2[cardN] = shuffled[i_card+1]
  p3[cardN] = shuffled[i_card+2]
}

p1 = shuffled[seq(1,15,3)]
p2 = shuffled[seq(2,15,3)]
p3 = shuffled[seq(3,15,3)]

players = matrix("joker",nrow = Ncard, ncol = Nplayer)
colnames(players) <- c("p1","p2","p3")
cardN = 0
for (i_card in 1:Ncard){
  for (i_player in 1:Nplayer){
    cardN = cardN + 1
    players[i_card, i_player] = shuffled[cardN]
  }
}

players$p1 = shuffled[seq(1,15,3)]
players$p2 = shuffled[seq(2,15,3)]
players$p3 = shuffled[seq(3,15,3)]



p1WinC = p2WinC = tie = 0
Nplay = 10
for (i_play in 1:Nplay){
  p1 = sample(1:6,1)
  p2 = sample(1:6,1)
  if (p1 > p2){
    p1WinC = p1WinC + 1
  } else {
    if (p2 > p1){
      p2WinC = p2WinC + 1
    } else {
      tie = tie + 1 
    }
  }
}

result = sample(c(0,1,2),10, replace=T, prob = c(2/12,5/12,5/12))

win_thres = 5
p1WinC = p2WinC = tie = 0

while(max(c(p1WinC, p2WinC)) < win_thres){
  p1 = sample(1:6,1)
  p2 = sample(1:6,1)
  if (p1 > p2){
    p1WinC = p1WinC + 1
  } else {
    if (p2 > p1){
      p2WinC = p2WinC + 1
    }
  }
}
if (p1WinC>p2WinC){
  print("player 1 won")
} else {
    print("player 2 won")
}


win_thres = 5
p1WinC = p2WinC = tie = 0

repeat{
  p1 = sample(1:6,1)
  p2 = sample(1:6,1)
  if (p1 > p2){
    p1WinC = p1WinC + 1
  } else {
    if (p2 > p1){
      p2WinC = p2WinC + 1
    }
  }
  if(max(c(p1WinC, p2WinC)) >= win_thres){
    if (p1WinC>p2WinC){
      print("player 1 won")
    } else {
      print("player 2 won")
    }
    break
  }
}


vec = sample(1:15)
len_vec = length(vec)
#vec = 1:15
for (loop1 in 1:(len_vec-1)){
  for (loop2 in 2:(len_vec - loop1 + 1)){
    if (vec[loop2] < vec[(loop2 - 1)]){
      temp_num = vec[loop2]
      vec[loop2] = vec[(loop2-1)]
      vec[(loop2-1)] = temp_num
    }
  }
  print(paste("result after loop1 = ", loop1))
  print(vec)
}

vec = sample(1:15)
len_vec = length(vec)
#vec = 1:15
for (loop1 in 1:(len_vec-1)){
  if (all(1:15 == vec)) {
    print("sorted")
    print(vec)
    break
  }
  for (loop2 in 2:(len_vec - loop1 + 1)){
    if (vec[loop2] < vec[(loop2 - 1)]){
      temp_num = vec[loop2]
      vec[loop2] = vec[(loop2-1)]
      vec[(loop2-1)] = temp_num
    }
  }
  print(paste("result after loop1 = ", loop1))
  print(vec)
}

認知情報解析学演習b

library(keras)
library(tensorflow)
tf$compat$v1$disable_eager_execution()


deprocess_image <- function(x) {
  dms <- dim(x)
  x <- x - mean(x) 
  x <- x / (sd(x) + 1e-5)
  x <- x * 0.1 
  x <- x + 0.5 
  x <- pmax(0, pmin(x, 1))
  array(x, dim = dms)
}

generate_pattern <- function(layer_name, filter_index, size = 150) {
  
  layer_output <- model$get_layer(layer_name)$output
  loss <- k_mean(layer_output[,,,filter_index]) 
  grads <- k_gradients(loss, model$input)[[1]]
  
  grads <- grads / (k_sqrt(k_mean(k_square(grads))) + 1e-5)
  
  iterate <- k_function(list(model$input), list(loss, grads))
  
  input_img_data <- 
    array(runif(size * size * 3), dim = c(1, size, size, 3)) * 20 + 128
  
  step <- 1
  for (i in 1:40) {
    c(loss_value, grads_value) %<-% iterate(list(input_img_data))
    input_img_data <- input_img_data + (grads_value * step) 
  }
  
  img <- input_img_data[1,,,]
  deprocess_image(img) 
}


library(grid)

library(keras)
model <- application_vgg16(
  weights = "imagenet", 
  include_top = FALSE
)
layer_name <- "block3_conv1"
filter_index <- 1
grid.raster(generate_pattern("block3_conv1", 1))

認知情報解析演習B

input_chiba <- layer_input(shape = c(28,28,1))

brach_a = input_chiba %>% 
  layer_conv_2d(filter = 32, kernel_size = 1,
                activation = "relu", strides = 2)

brach_b = input_chiba %>% 
  layer_conv_2d(filter = 32, kernel_size = 1,
                activation = "relu") %>% 
  layer_conv_2d(filter = 32, kernel_size = 2,
                activation = "relu", stride = 2)

brach_c = input_chiba %>% 
  layer_average_pooling_2d(pool_size = 2, stride = 2) %>% 
  layer_conv_2d(filter = 32, kernel_size = 1,
                activation = "relu")

brach_d = input_chiba %>% 
  layer_conv_2d(filter = 32, kernel_size = 1,
                activation = "relu") %>% 
  layer_conv_2d(filter = 32, kernel_size = 1,
                activation = "relu") %>% 
  layer_conv_2d(filter = 32, kernel_size = 2,
                activation = "relu", stride = 2)

concat = layer_concatenate(list(brach_a, brach_b, brach_c, brach_d))

output_chiba = concat %>% layer_flatten() %>%
  layer_dense(units = 64, activation = "relu") %>%
  layer_dense(units = 10, activation = "softmax")

chiba_model <- keras_model(input_chiba, output_chiba)

mnist <- dataset_mnist()
c(c(train_images, train_labels),c(test_images,test_labels)) %<-% mnist

train_images <- array_reshape(train_images,c(60000,28,28,1))
test_images <- array_reshape(test_images,c(10000,28,28,1))

train_images = train_images/255
test_images = test_images/255

train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

chiba_model %>% compile(
  optimizer = "rmsprop",
  loss = "categorical_crossentropy",
  metrics = c("accuracy")
)

history <- chiba_model %>% fit(
  train_images,
  train_labels,
  epochs = 15,
  batch_size =64,
  validation_data = list(test_images,test_labels)
)
plot(history)

広域システム特別講義II 認知・社会モデル

# cognitive modeling
ALC.init<-function(size) {
  # size[1] = input dimension
  # size[2] = number of exemplars
  # size[3] = number of categories
  alpha=matrix(1,nrow=1,ncol=size[1])/size[1]
  w=matrix(rnorm(size[3]*size[2])*0.1,ncol=size[2])
  c=2            # gain
  phi=3          # decision strength
  lrA=0.2        # learning rate for attentions
  lrW=0.1        # learning rate for associations
  return(list(alpha = alpha,
              w = w,
              lrA = lrA,
              lrW = lrW,
              c = c,
              phi = phi))
}

# alcove forward process
alcove.forward <- function(parmSet, inp, exemplar){
  diff= matrix(abs(matrix(1,n.exemp,1)%*%inp-exemplar),nrow=nrow(exemplar))
  exemp=exp(-parmSet$c*(diff%*%t(parmSet$alpha)))
  out=parmSet$w%*%exemp
  return(list(diff = diff, exemp = exemp, out = out))
}

# alcove backward process
alcove.backward <- function(res.forward,parmSet, target){
  err=target-res.forward$out
  dW=parmSet$lrW*res.forward$exemp%*%t(err)
  dA=-parmSet$lrA*(t(err)%*%parmSet$w*t(res.forward$exemp))%*%res.forward$diff*parmSet$c
  return(list(dW = dW, dA = dA))
}

### ALCOVE full implementation
ALC.init<-function(size) {
  # size[1] = input dimension
  # size[2] = number of exemplars
  # size[3] = number of categories
  alpha=matrix(1,nrow=1,ncol=size[1])/size[1]
  w=matrix(rnorm(size[3]*size[2])*0.1,ncol=size[2])
  c=2            # gain
  phi=3          # decision strength
  lrA=0.2        # learning rate for attentions
  lrW=0.1        # learning rate for associations
  return(list(alpha = alpha,
              w = w,
              lrA = lrA,
              lrW = lrW,
              c = c,
              phi = phi))
}

# alcove forward process
alcove.forward <- function(parmSet, inp, exemplar){
  diff= matrix(abs(matrix(1,n.exemp,1)%*%inp-exemplar),nrow=nrow(exemplar))
  exemp=exp(-parmSet$c*(diff%*%t(parmSet$alpha)))
  out=parmSet$w%*%exemp
  return(list(diff = diff, exemp = exemp, out = out))
}

# alcove backward process
alcove.backward <- function(res.forward,parmSet, target){
  err=target-res.forward$out
  dW=parmSet$lrW*res.forward$exemp%*%t(err)
  dA=-parmSet$lrA*(t(err)%*%parmSet$w*t(res.forward$exemp))%*%res.forward$diff*parmSet$c
  return(list(dW = dW, dA = dA))
}

# main function
alcove<-function(parmSet, inp, exemplar, targ,iter) {
  # ----ALCOVE for R ---
  # input arguments
  #  parmSet - parameter set
  # inp - input matrix (n_sample x n_dimension)
  # exemplar - exemplar matrix (n_sample x n_dimension)
  # targ - target matrix (n_sample x n_category)
  # iter - # of training epochs
  # ----ALCOVE for R ---
  # initialization
  inpDim = ncol(inp)
  n.inp = nrow(inp)
  n.exemp = nrow(exemplar)
  n.cat = ncol(targ)
  accu=rep(0,nrow=iter+1)
  attn=matrix(0,nrow=iter+1,ncol=inpDim)
  attn[1,]=alpha
  # end initialization

  # main loop
  for (i_iter in 1:iter) {
    ro=sample(1:n.inp, n.inp)
    prob_temp=0;
    for (i_tr in 1:n.inp) {
      res.forward <- alcove.forward(parmSet, inp[ro[i_tr],], exemplar)
      res.backward <- alcove.backward(res.forward, parmSet, targ[ro[i_tr],])
      parmSet$w = parmSet$w+t(res.backward$dW)
      parmSet$alpha = parmSet$alpha+res.backward$dA
      parmSet$alpha[which(parmSet$alpha<0)]=0
      pT=(exp(parmSet$phi*res.forward$out)/sum(exp(parmSet$phi*res.forward$out)))*targ[ro[i_tr],]
      prob_temp=prob_temp+pT[which(!pT==0)]
    }
    accu[i_iter+1]=prob_temp/n.inp
    attn[i_iter+1,]=alpha
  }
  attnN=attn/apply(attn,1, sum)
  out=matrix(0,nrow=n.cat,ncol=n.inp)
  for (i_tr in 1:n.inp) {
    res.forward<-alcove.forward(parmSet, inp[i_tr,], exemplar)
    out[,i_tr]=res.forward$out
  }
  return(list(attn=attn,attnN=attnN,accu=accu,out=out,ps=parmSet))
}


exemplar = matrix(c(0,0,0,
                    0,0,1,
                    0,1,0,
                    0,1,1,
                    1,0,0,
                    1,0,1,
                    1,1,0,
                    1,1,1),byrow=T,nrow=8)
inp = exemplar

target1 = matrix(c(0,0,0,0,1,1,1,1,
                   1,1,1,1,0,0,0,0),nrow=8)

target2 = matrix(c(1,1,0,0,0,0,1,1,
                   0,0,1,1,1,1,0,0),nrow=8)

target3 = matrix(c(1,1,1,0,0,1,0,0,
                   0,0,0,1,1,0,1,1),nrow=8)

target4 = matrix(c(1,1,1,0,1,0,0,0,
                   0,0,0,1,0,1,1,1),nrow=8)

target5 = matrix(c(1,1,1,0,0,0,0,1,
                   0,0,0,1,1,1,1,0),nrow=8)

target6 = matrix(c(1,0,0,1,0,1,1,0,
                   0,1,1,0,1,0,0,1),nrow=8)

seed.num = 1;n.train = 50
set.seed(seed.num)
parmSet<-ALC.init(c(3,8,2))
result1<-alcove(parmSet,inp,exemplar,target1,n.train)
plot(result1$accu,type='o',lwd=2,pch=20,col=1)
set.seed(seed.num)
parmSet<-ALC.init(c(3,8,2))
result2<-alcove(parmSet,inp,exemplar,target2,n.train)
lines(result2$accu,type='o',lwd=2,pch=20,col=2)
set.seed(seed.num)
parmSet<-ALC.init(c(3,8,2))
result3<-alcove(parmSet,inp,exemplar,target3,n.train)
lines(result3$accu,type='o',lwd=2,pch=20,col=3)
set.seed(seed.num)
parmSet<-ALC.init(c(3,8,2))
result4<-alcove(parmSet,inp,exemplar,target4,n.train)
lines(result4$accu,type='o',lwd=2,pch=20,col=4)
set.seed(seed.num)
parmSet<-ALC.init(c(3,8,2))
result5<-alcove(parmSet,inp,exemplar,target5,n.train)
lines(result5$accu,type='o',lwd=2,pch=20,col=5)
set.seed(seed.num)
parmSet<-ALC.init(c(3,8,2))
result6<-alcove(parmSet,inp,exemplar,target6,n.train)
lines(result6$accu,type='o',lwd=2,pch=20,col=6)
legend("bottomright",paste("type",1:6,sep=''),col=1:6,lwd=2,pch=20)
### end ALCOVE

# evo. game
food = 6; cost = 10
A = 0.5*(food-cost); B = food
C = 0; D = food/2
pay.mat = matrix(c(A,B,C,D),nrow=2,byrow=TRUE)
dt = 0.01;max_time=1000
p = rep(0,max_time)
q = rep(0,max_time)
p[1] = 0.2
q[1] = 1 - p[1]
for (t in 1:max_time){
  prob.mat = outer(c(p[t],q[t]),c(p[t],q[t]))
  W.ave = sum(prob.mat*pay.mat)
  W.h = sum(c(p[t],q[t])*pay.mat[1,])
  W.d = sum(c(p[t],q[t])*pay.mat[2,])
  p[(t+1)] = p[t]+(p[t]*(W.h-W.ave)/W.ave)*dt
  q[(t+1)] = q[t]+(q[t]*(W.d-W.ave)/W.ave)*dt
}

plot(p,type='l',lwd=2,col='red',ylim=c(0,1))
lines(q,type='l',lwd=2,col='blue')

# cake game
n.cake = 10
pay.mat = matrix(0,n.cake+1,n.cake+1)
for (i.cake in 1:n.cake){
  pay.mat[(i.cake+1),1:(n.cake-i.cake+1)] =i.cake
}

p.cake = runif(n.cake+1)
p.cake = p.cake/sum(p.cake)
max.time = 50
dt = 0.01
t = seq(0,max.time,dt)
n.iter = length(t)
p.hist = matrix(0,nrow = n.iter, ncol = (n.cake+1))
p.hist[1,] = p.cake

for (i.time in 2:n.iter){
  W = colSums(p.cake*t(pay.mat))
  W.ave = sum(outer(p.cake,p.cake)*pay.mat)
  p.cake = p.cake + p.cake*(W - W.ave)/W.ave * dt
  p.hist[i.time,] = p.cake
}

plot(p.hist[,1],ylim=c(0,1),type='l',lwd=2,ylab = 'Proportion',xlab="time")
for (i.strat in 2:(n.cake+1)){
  lines(p.hist[,i.strat],col=i.strat,lwd=2)
}
legend("topleft",paste("request = ",0:10),col=1:(n.cake+1),lwd =2,cex=0.75)

### fighting couple
fighting_couple<-function(a,b,c,d) {
  timeSep=0.05;ts=seq(1,50,timeSep);n_ts=length(ts)
  x=matrix(0,nrow=n_ts,ncol=1)
  y=matrix(0,nrow=n_ts,,ncol=1)
  initX=c(rep(-40,5),rep(-20,5),rep(0,5),rep(20,5),rep(40,5))
  initY=c(rep(c(-40,-20,0,20,40),5))
  initX=initX[-13]
  initY=initY[-13]
  lengthINI=length(initX)
  for (i_ini in 1:lengthINI) {
    x[1]=initX[i_ini];y[1]=initY[i_ini];
    for (i_gen in 2:n_ts) {
      x[i_gen]=x[i_gen-1]+(a*x[i_gen-1]+b*y[i_gen-1])*timeSep
      y[i_gen]=y[i_gen-1]+(c*x[i_gen-1]+d*y[i_gen-1])*timeSep
    }
    if (i_ini==1) {
      plot(x,y,xlim=c(-50,50),ylim=c(-50,50),col=4,type='l',lwd=2,
           xlab="X's Action",ylab="Y's Action")
      arrows(x[2],y[2],x[3],y[3],col=4,lwd=2,length=0.15)} else {
        lines(x, y, col=4, lwd=2)
        arrows(x[2], y[2], x[3], y[3], col=4,lwd=2, length=0.15)
      }
  }
}

par(mfrow=c(2,3))
fighting_couple(-1,0.0,0.5,-1)
fighting_couple(-1,2,-1,-1)
fighting_couple(0,-1,1,0)
fighting_couple(1,-2,2,0)
fighting_couple(1,0,0.5,1)
fighting_couple(1,-4,-4,0)
Posted in UT

広域システム特別講義II 確率的最適化法

naive.stochOptim <- function(obj.func, x, n.iter, width){
  opt.value <- do.call(obj.func, list(x))
  opt.hist = matrix(0, nrow = n.iter, ncol = 5)
  opt.hist[1,] = c(x, x, opt.value, opt.value, 1)
  for (i.iter in 2:n.iter){
    accpet = 0
    temp.x <-  x + rnorm(1, mean = 0, sd = width)
    temp.value <- do.call(obj.func, list(temp.x))
    if (temp.value <  opt.value){
      x = temp.x
      opt.value = temp.value
      accept = 1
    }
    opt.hist[i.iter, ] = c(x, temp.x, opt.value, temp.value, 1)
  }
  return(data.frame(opt.hist))
}

set.seed(50)
n.iter =500
fun01<-function(x){x^2+2*x}
res <- naive.stochOptim(fun01,3,n.iter,1)

# vizualizing results
par(mfrow=c(2,1))
# upper panel
plot(res$X2,res$X4,col='red',pch=20,cex=2,xlab = "x", ylab='f(x)')
legend("topleft",c("accepted","tested"),pch=c(20,20),col=c("black","red"))
x.seq = seq(min(res$X2),max(res$X2),length.out = n.iter)
lines(x.seq,fun01(x.seq))
points(res$X1,res$X3,col='black',pch=20)
# lower panel
plot(res$X3,1:n.iter,type='n',pch=20,xlim=c(min(res$X2),max(res$X2)),xlab='x',ylab="Trial")
for (i.iter in 2:n.iter){
  lines(c(res$X1[(i.iter-1)],res$X2[i.iter]),c((i.iter-1),i.iter),type='o',pch=20,col='red',cex=1.5)
}
points(res$X1,1:n.iter,type='o',pch=20)
legend("topright",c("accepted","tested"),pch=c(20,20),col=c("black","red"))

SimAneal01<-function(func, initXY, maxItr=1000, C=1, eta=0.99, width=10) {
  x=initXY
  opt.value = do.call(func,list(x))
  n.var = length(x)
  opt.hist=matrix(0, nrow=maxItr, ncol=5)
  opt.hist[1,]=c(x,x,opt.value,opt.value,0)
  for (i_loop in 2:maxItr) {
    accept = 0
    temp.x = x + rnorm(n.var, mean = 0, sd=width)
    temp.value= do.call(func,list(temp.x))
    delta=temp.value-opt.value;
    prob=1/(1+exp(delta/(C*width)))
    if (runif(1) < prob) {
      x = temp.x;
      opt.value = temp.value;
      accept = 1
    }
    opt.hist[i_loop,]=c(x, temp.x, opt.value, temp.value, accept);
    width=width*eta
  }
  return(data.frame(opt.hist))
}

set.seed(48)
n.iter = 500
res <- SimAneal01(fun01, 3, n.iter, 1, 0.985, 1)
#vizualizing results
par(mfrow=c(2,1))
# upper panel
plot(res$X2,res$X4,col='red',pch=20,cex=2,xlab = "x", ylab='f(x)')
legend("topleft",c("accepted","tested"),pch=c(20,20),col=c("black","red"))
x.seq = seq(min(res$X2),max(res$X2),length.out = n.iter)
lines(x.seq,fun01(x.seq))
points(res$X1,res$X3,col='black',pch=20)
# lower panel
plot(res$X3,1:n.iter,type='n',pch=20,xlim=c(min(res$X2),max(res$X2)),xlab='x',ylab="Trial")
for (i.iter in 2:n.iter){
  lines(c(res$X1[(i.iter-1)],res$X2[i.iter]),c((i.iter-1),i.iter),type='o',pch=20,col='red',cex=1.5)
}
points(res$X1,1:n.iter,type='o',pch=20)
legend("topright",c("accepted","tested"),pch=c(20,20),col=c("black","red"))


### TSP
TSP_inv<-function(route,len) {
  len_route=length(route)
  invP=sample(1:len_route,1)
  route[invP:min(len_route,(invP+len-1))]=rev(route[invP:min(len_route,(invP+len-1))])
  return(route)
}

TSP_switch<-function(route){
  len_route=length(route)
  switchP=sample(1:len_route,2)
  route[switchP]=route[rev(switchP)]
  return(route)
}

TSP_trans<-function(route){
  len_route=length(route)
  transP=sample(1:len_route,2);
  tempR=route[-transP[1]]
  if (transP[2]==1){
    tempR=c(route[transP[1]],tempR)
  } else if (transP[2]==len_route) {
    tempR=c(tempR,route[transP[1]])
  } else {
    tempR=c(tempR[1:(transP[2]-1)],route[transP[1]],tempR[(transP[2]):
                                                            (len_route-1)])
  }
  return(tempR)
}

FUN_initTSP<-function(n_city=10) {
  return(matrix(runif(n_city*2,1,100),nrow=n_city,ncol=2))
}

FUN_costTSP<-function(route,cities) {
  route=c(route,route[1]);n_cities=nrow(cities);totDist=0;
  for (i_cities in 1:n_cities) {
    totDist=totDist+dist(cities[route[i_cities:(i_cities+1)],])
  }
  return(totDist)
}


TSP_demo<-function(n_city=20, maxItr=1000 , C = 1, eta = 0.99, TEMP = 50) {
  loc=FUN_initTSP(n_city);route=1:n_city
  optDist=FUN_costTSP(route,loc)
  optHist=matrix(0,nrow=maxItr,ncol=(length(route)+1))
  optHist[1,]=c(route,optDist)
  for (i_loop in 2:maxItr) {
    rand.op=sample(c('inv','sw','trans'),1,prob=c(0.75,0.125,0.125))
    if (rand.op=='inv') {
      new.route=TSP_inv(route,sample(2:n_city,1))
    } else if (rand.op=='sw') {
      new.route=TSP_switch(route)
    } else { new.route=TSP_trans(route)}
    new.dist=FUN_costTSP(new.route,loc)
    delta=new.dist-optDist
    prob=1/(1+exp(delta/(C*TEMP)))
    if (runif(1) < prob) {
      route=new.route;optDist=new.dist;
    }
    optHist[i_loop,]=c(route,optDist);
    TEMP=TEMP*eta
  }
  par(mfrow=c(1,2))
  plot(rbind(loc,loc[1,]),type='o',pch=20,cex=2.5, col='red',
       xlab='location @X',ylab='location @Y',main='Initial route')
  plot(loc[optHist[1000,c(1:n_city,1)],],type='o',pch=20, col='magenta',
       cex=2.5,xlab='location @X',ylab='location @Y',main='Optimized route')
  return(optHist)
}

TSP_demo()

### GA / ES
# genetic algorithm - identify the best linear model
GA_recomb<-function(G) {
  nPop=nrow(G);
  nVar=ncol(G);
  child = G;
  G.permuted = G[sample(1:nPop),]
  recomb.idx=which(matrix(sample(0:1,nPop*nVar,replace=TRUE),nrow=nPop)==1)
  child[recomb.idx] = G.permuted[recomb.idx]
  return(child)
}

GA_mutate = function(child, p){
  n.pop = nrow(child)
  n.var = ncol(child)
  mut.mat= matrix((runif(n.pop*n.var) < p), nrow = n.pop)
  child = abs(child-mut.mat)
  return(child)
}

GA_survive<-function(G, child, fitG, fitC){
  nPop=nrow(G);
  fitT=c(fitG,fitC);
  fitMax=sort(fitT,index.return=T,decreasing = T)
  tempX=rbind(G,child);
  G=tempX[fitMax$ix[1:nPop],]
  return(G)
}

GA<-function(G, nGen,p,X,Y){
  optHist=matrix(0,nrow=nGen,ncol=1)
  G.hist = matrix(0,nrow=nGen,ncol = ncol(G))
  nPop = nrow(G)
  nVar = ncol(G)
  fitG = rep(0,nPop)
  fitC = fitG
  for (i.pop in 1:nPop){
    dat.temp = X[,which(G[i.pop,]==1)]
    fitG[i.pop] = summary(lm(Y~dat.temp))$adj.r.squared
  }
  optHist[1]=max(c(fitG))
  G.hist[1,] = G[which.max(fitG),]
  for (i_gen in 2:nGen) {
    child<-GA_recomb(G)
    child<-GA_mutate(child,p)
    # assuming same numbers of set of genes
    for (i.pop in 1:nPop){
      dat.temp = X[,which(G[i.pop,]==1)]
      fitG[i.pop] = summary(lm(Y~dat.temp))$adj.r.squared
      if (sum(child[i.pop,])==0){
        child[i.pop,sample(1:ncol(child.all),sample(1:nVar,1))]=1
      }
      dat.temp = X[,which(child[i.pop,]==1)]
      fitC[i.pop] = summary(lm(Y~dat.temp))$adj.r.squared
    }
    G<-GA_survive(G, child, fitG, fitC)
    optHist[i_gen]=max(c(fitG,fitC))
    G.hist[i_gen, ] = G[1,]
  }
  return(list(G = G, optHist = optHist, G.hist = G.hist))
}

set.seed(19)
nVar = 10
nData = 50
X=matrix(rnorm(nVar*nData,mean=10,sd=5),ncol=nVar);
y=X%*%c(-2,0,2,0,2,0,-3,0,-2,0)+rnorm(nData,mean=0,sd=8);
summary(lm(y~X[,seq(1,9,2)]))
summary(lm(y~X))
G = matrix(sample(0:1,300,replace = T),nrow=30)
res<-GA(G,100,0.1,X,y)
head(res$G)

# GA - challenging task
C = combn(3,2)
temp.lab =toString(paste("X",C[ , i.comb], sep = ""))
gsub(",", "*", temp.lab)

n.comb = ncol(C)
for (i.comb in 1: n.comb){
  temp.label = toString(paste("X",C[ , i.comb], sep = ""))
  var.labels = c(var.labels, gsub(",", "*", temp.label) )
}

mk.labels <- function(C){
  var.labels = c()
  n.comb = ncol(C)
  for (i.comb in 1: n.comb){
    temp.label = toString(paste("X",C[ , i.comb], sep = ""))
    var.labels = c(var.labels, gsub(",", "*", temp.label) )
  }
  return(var.labels)
}
var.labels = paste("X", 1:n.var, sep = "")

# interaction terms
for (i.interaction  in 2:max.interaction ){
  combination = combn(n.var, i.interaction)
  var.labels = c(var.labels, mk.labels(combination))
}
model.def = paste("Y ~", gsub(",", "+", toString(var.labels[c(1,1,1,1,1,0,0) == 1])))
X=matrix(rnorm(nVar*nData,mean=10,sd=5),ncol=nVar);
y=X%*%c(1,1,-2)+rnorm(nData,mean=0,sd=3);
dat = data.frame(y=y,X1=X[,1],X2=X[,2],X3=X[,3])

model.lm = lm(model.def, dat)

## full implementation
# function to make model labels for paritcular set of combn
mk.labels <- function(C){
  var.labels = c()
  n.comb = ncol(C)
  for (i.comb in 1: n.comb){
    temp.label = toString(paste("X",C[ , i.comb], sep = ""))
    #  options for model specification
    #  Opt 1: models that include all lower-degree interaction term
    #  Opt 2: models that do NOT include any lower-degree interaction term
    #
    # Opt1
    #var.labels = c(var.labels, gsub(",", ":", temp.label) )
    #
    # Opt2
    var.labels = c(var.labels, gsub(",", ":", temp.label) )
  }
  return(var.labels)
}

# function to make all model labels given number of variable
# and maximum degrees of interactions
mk.labels.all <- function(n.var, max.interaction){
  var.labels = paste("X", 1:n.var, sep = "")
  for (i.interaction  in 2:max.interaction ){
    combination = combn(n.var, i.interaction)
    var.labels = c(var.labels, mk.labels(combination))
  }
  return(var.labels)
}

# crearting model label
# assuming 10 variables & 5-degree interaction terms
var.labels<-mk.labels.all(5,5)

# generating data set
set.seed(30)
nData = 1000; n.var = 5
X=matrix(rnorm(n.var*nData,mean=10,sd=5),ncol=n.var);
Y=X[,seq(1,5)]%*%c(1,1,-2,-1,1) +
  apply(X[,c(1,3)],1,prod)*0.1 +
  apply(X[,c(2,4)],1,prod)*-0.1 +
  apply(X[,c(1,3,5)],1,prod)*0.01 +
  apply(X[,c(2,4,5)],1,prod)*0.01+
  rnorm(nData,mean=0,sd=10);

dat = data.frame(Y,X)
colnames(dat) <- c('Y', paste("X",1:n.var,sep=""))
# checking fit of the "TRUE" model
summary(lm(Y~X1*X3+X2*X4+X1:X3:X5++X2:X4:X5+X5, dat))
AIC(lm(Y~X1*X3+X2*X4+X1:X3:X5++X2:X4:X5+X5, dat))

# initializing parent population
nPop = 50
G = matrix(sample(0:1,nPop*length(var.labels),replace=TRUE,prob = c(0.8, 0.2)),nrow=nPop)

# recombination
GA_recomb<-function(G) {
  nPop=nrow(G);
  nVar=ncol(G);
  child = G;
  G.permuted = G[sample(1:nPop),]
  recomb.idx=which(matrix(sample(0:1,nPop*nVar,replace=T),nrow=nPop)==1)
  child[recomb.idx] = G.permuted[recomb.idx]
  return(child)
}

# mutation
GA_mutate = function(child, p){
  n.pop = nrow(child)
  n.var = ncol(child)
  mut.mat= matrix((runif(n.pop*n.var) < p), nrow = n.pop)
  child = abs(child-mut.mat)
  return(child)
}

# surviver selection - works with either min or max prob.
GA_survive<-function(G, child, fitG, fitC, mnmx = "min"){
  nPop=nrow(G);
  fitT=c(fitG,fitC);
  if (mnmx == "max"){
    fitMax=sort(fitT,index.return=TRUE,decreasing = TRUE)
  } else {
    fitMax=sort(fitT,index.return=TRUE)
  }
  tempX=rbind(G,child);
  G=tempX[fitMax$ix[1:nPop],]
  return(G)
}

# main function
GA<-function(G, nGen,p,dat){
  optHist=matrix(0,nrow=nGen,ncol=1)
  G.hist = matrix(0,nrow=nGen,ncol = ncol(G))
  nPop = nrow(G)
  nVar = ncol(G)
  fitG = rep(0,nPop)
  fitC = fitG
  for (i.pop in 1:nPop){
    model.def = paste("Y ~", gsub(",", "+", toString(var.labels[G[i.pop,] == 1])))
    #fitG[i.pop] = summary(lm(model.def,dat))$adj.r.squared
    fitG[i.pop] = AIC(lm(model.def,dat))
  }
  optHist[1]=max(c(fitG))
  G.hist[1,] = G[which.max(fitG),]

  # to display progress
  prog.prop = round(seq(0,nGen,length.out=10))
  prog.report = paste(seq(0,100,10),"% done\n",sep="")
  for (i_gen in 2:nGen) {
    if (any(i_gen == prog.prop)){
      cat(prog.report[which(i_gen==prog.prop)])
    }
    child <- GA_recomb(G)
    child <- GA_mutate(child,p)
    # assuming same numbers of set of genes
    for (i.pop in 1:nPop){
      model.def = paste("Y ~", gsub(",", "+", toString(var.labels[G[i.pop,] == 1])))
      #fitG[i.pop] = summary(lm(model.def,dat))$adj.r.squared
      fitG[i.pop] = AIC(lm(model.def,dat))
      if (sum(child[i.pop,],na.rm=TRUE)==0){
        child[i.pop,sample(1:ncol(child.all),sample(1:nVar,1))]=1
      }
      model.def = paste("Y ~", gsub(",", "+", toString(var.labels[child[i.pop,] == 1])))
      #fitC[i.pop] = summary(lm(model.def,dat))$adj.r.squared
      fitC[i.pop] = AIC(lm(model.def,dat))
    }

    G <- GA_survive(G, child, fitG, fitC, "min")
    optHist[i_gen]=min(c(fitG,fitC))
    G.hist[i_gen, ] = G[1,]
  }
  return(list(G = G, optHist = optHist, G.hist = G.hist))
}

# running simulation
res = GA(G,1000,0.15, dat)
model.def = paste("Y ~", gsub(",", "+", toString(var.labels[res$G[1,] == 1])))
summary(lm(model.def,dat))
AIC(lm(model.def,dat))
plot(res$optHist,type='l',xlab="generation",ylab="AIC")
var.labels[which(res$G[1,]==1)]

### end extensive GA

# Evolutionary Strategy - estimating coefficient
ES_recomb<-function(G) {
  nParent=nrow(G$x);nVar=ncol(G$x)
  child = G;
  for (i_child in 1:nParent) {
    parentID=sample(1:nParent,2)
    coID=sample(c(0,1),nVar,replace=T)
    child$x[i_child,]=G$x[parentID[1],]
    child$x[i_child,which(coID==1)]=G$x[parentID[2],which(coID==1)]
    child$sigma[i_child,]=0.5*(G$sigma[parentID[1],]+G$sigma[parentID[2],])
  }
  return(child)
}

ES_mutate<-function(child,tau){
  nChild=nrow(child$x);nVar=ncol(child$x)
  child$sigma<-child$sigma*exp(matrix(rnorm(nChild*nVar)*tau,nrow=nChild))
  child$x=child$x+child$sigma*matrix(rnorm(nChild*nVar),nrow=nChild,ncol=nVar)
  return(child)
}

ES_survive<-function(G, child, fitG, fitC){
  nParent=nrow(G$x);
  fitT=c(fitG,fitC);
  fitMin=sort(fitT,index.return=T)
  tempX=rbind(G$x,child$x);
  tempS=rbind(G$sigma,child$sigma)
  G$x=tempX[fitMin$ix[1:nParent],]
  G$sigma=tempS[fitMin$ix[1:nParent],]
  return(G)
}

ES<-function(G,func, nGen, tau,X,y){
  optHist=matrix(0,nrow=nGen,ncol=1)
  G.hist = matrix(0,nrow=nGen,ncol = ncol(G$x))
  fitG = fitG = apply(G$x,1,func,X,y)
  optHist[1] = min(fitG)
  G.hist[1,] = G$x[which.min(fitG),]
  child = G;
  for (i_gen in 2:nGen) {
    child<-ES_recomb(G)
    child<-ES_mutate(child,tau)
    fitG = apply(G$x,1,func,X,y)
    fitC = apply(child$x,1,func,X,y)
    G<-ES_survive(G, child, fitG, fitC)
    optHist[i_gen]=min(c(fitG,fitC))
    G.hist[i_gen, ] = G$x[1,]
  }
  return(list(G = G, optHist = optHist, G.hist = G.hist))
}

set.seed(20); nData = 100
X=matrix(rnorm(9*nData,mean=10,sd=2),ncol=9);X=cbind(rep(1,nData),X)
y=X%*%c(10,2,5,-3,-5,0,0,0,0,0)+rnorm(nData,mean=0,sd=2);
fun04<-function(b,X,y){
  yhat<-X%*%b
  return(sum((y-yhat)^2))
}
G = list()
G$x = matrix(rnorm(10*30), ncol=10)
G$sigma = matrix(runif(10*30,0.5,1.5), ncol=10)

res=ES(G, fun04, 1000, 1,X,y)

# PSO - optim. 2-variable function
PSO<-function(G, wP, wG, func, maxIter){
  swarm.hist = array(0, c(nrow(G), ncol(G), maxIter))
  swarm.hist[,,1]=G
  p.b.hist = apply(G,1,func)
  global.best.v = min(p.b.hist)
  p.Best = G
  g.Best = matrix(G[which.min(p.b.hist),],nrow=nrow(G),ncol=ncol(G),byrow=T)
  v = matrix(0,nrow = nrow(G),ncol = ncol(G))
  for (i.iter in 2:maxIter){
    v = v + wP*runif(1)*(p.Best - G) + wG*runif(1)*(g.Best - G)
    G = G + v
    fitG = apply(G,1,func)
    if (min(fitG) < global.best.v){
      g.Best = matrix(G[which.min(fitG),],nrow=nrow(G),ncol=ncol(G),byrow=T)
      global.best.v = min(fitG)
    }
    idx = which(fitG < p.b.hist)
    p.Best[idx,] = G[idx,]
    p.b.hist= fitG
    swarm.hist[,,i.iter]=G
  }
  return(swarm.hist)
}

# running PSO
par(mfrow=c(1,1))
fun03<-function(x) {x[1]^4-16*x[1]^2-5*x[1]+x[2]^4-16*x[2]^2-5*x[2]}
x<-seq(-5, 5,length.out=100);y<-x;z<-outer(x,y,funZ)
contour(x,y,z,nlevels=30,drawlabel=F)
nSwarm = 100; nVar = 2;
G=matrix(rnorm(nVar*nSwarm,mean=0,sd=0.1),ncol=nVar)
res<-PSO(G,0.1,0.1,fun03,500)
lines(res[1,1,],res[1,2,],type='o',col='red')
Posted in UT

データ解析基礎論B 人工神経回路

library(nnet)
dat<-read.csv("http://www.matsuka.info/data_folder/tdkReg01.csv")
set.seed(5)
x = dat[,1:3]
y = dat[,4]
dat.nnet = nnet(x,y, size = 150, linout= TRUE, maxit = 1000)
nnet.pred <-predict(dat.nnet,dat)
cor(dat.nnet$fitted.values,dat$sales)^2

n.data = nrow(dat);
n.sample = n.data*0.6;
n.rep = 100
trainNN.cor = rep(0,n.rep); trainLM.cor = rep(0,n.rep)
testNN.cor = rep(0,n.rep); testLM.cor = rep(0,n.rep)
for (i.rep in 1:n.rep){
  randperm = sample(1:n.data)
  train.idx = randperm[1:n.sample]
  test.idx = randperm[(n.sample+1):n.data]
  dat.nnet <- nnet(sales~.,size = c(10), linout=T,decay= 0.01, maxit=1000, data = dat[train.idx,])
  dat.lm <-lm(sales~.,data=dat[train.idx, ])
  trainNN.cor[i.rep] <- cor(predict(dat.nnet,dat[train.idx, ]), dat[train.idx,]$sales)
  trainLM.cor[i.rep] <- cor(predict(dat.lm,dat[train.idx, ]), dat[train.idx,]$sales)
  testNN.cor[i.rep] <- cor(predict(dat.nnet,dat[test.idx, ]), dat[test.idx,]$sales)
  testLM.cor[i.rep] <- cor(predict(dat.lm,dat[test.idx, ]), dat[test.idx,]$sales)
}
print(c(mean(trainNN.cor,na.rm=T),mean(testNN.cor,na.rm=T)))
print(c(mean(trainLM.cor,na.rm=T),mean(testLM.cor,na.rm=T)))
print(c(max(trainNN.cor,na.rm=T),max(testNN.cor,na.rm=T)))
print(c(max(trainLM.cor,na.rm=T),max(testLM.cor,na.rm=T)))
print(c(min(trainNN.cor,na.rm=T),min(testNN.cor,na.rm=T)))
print(c(min(trainLM.cor,na.rm=T),min(testLM.cor,na.rm=T)))

dat<-read.csv("http://matsuka.info/data_folder/tdkDA01.csv", header=T)
dat.nnet<-nnet(class~.,dat,size=30,maxit=1000,decay=0.05)
dat.pred<-predict(dat.nnet,dat,type="class")
table(dat.pred,dat$class)

dat.glm<-glm(class~., family="binomial",dat)
glm.pred<-predict(dat.glm, dat, type="response")>0.5
table(glm.pred,dat$class)

dat<-read.csv("http://www.matsuka.info/data_folder/cda7-16.csv")
wts = rep(1,nrow(dat))
wts[which(dat$survival=="no")]=45
dat.nnet<-nnet(survival~., weights=wts, dat, size=100, maxit=1000, decay=0.1)
dat.pred<-predict(dat.nnet,dat,type="class")
table(dat.pred,dat$survival)

dat<-read.csv("http://matsuka.info/data_folder/tdkDA02.csv",header=T)
class.id<-class.ind(dat$class)
x = dat[,1:6]
dat.nnet<-nnet(x, class.id, size=30, maxit=1000, decay=0.01, softmax=TRUE)
max.id = apply(dat.nnet$fitted.values,1,which.max)
table(max.id,dat$class)

dat<-read.table("http://www.matsuka.info/data_folder/tdkPCA01.txt")
dat.nnet<-nnet(dat,dat,size=2, maxit=1000, decay=0.01, linout=TRUE)

広域システム特別講義II ベイズ統計

library(rjags)
source("http://www.matsuka.info/univ/course_folder/HDI_revised.txt")

island.hopping2 <- function(n.rep=1e5, init.st=4) {
  # example from DBDA 2nd Ed. (Kruschke) ch. 07
  # intro to MCMC, island hopping

  # initialization
  state = 1:7
  curr.st = init.st
  state.counter = rep(0,7)
  state.counter[curr.st] = 1
  state.history=rep(0, n.rep)
  state.history[1]=curr.st

  prob.propose = matrix(1/6, nrow=7,ncol=7)
  diag(prob.propose)<-0

  # main loop
  for (i.rep in 2:n.rep) {
    destination = sample(state, 1, prob=prob.propose[curr.st,])
    prob.move = min(destination/curr.st, 1)
    if (runif(1) < prob.move) {
      curr.st = destination
    }
    state.history[i.rep] = curr.st
    state.counter[curr.st] = state.counter[curr.st]+1
  }
  par(mfrow=c(2, 1))
  par(mai=c(0, 1, 1, 1))
  par(mar=c(4, 3, 1, 3))
  barplot(state.counter, xlab="theta", ylab="Frequency")
  plot(state.history, 1:n.rep, type='o', log='y', xlab="theta", ylab="Time Step (log)", col="orange")
}

island.hopping2(10000, 4)

metropolis.ex01 <- function(n.iter=1e6, theta.init=0.1, sigma=0.2, plot.yn = T){
  # example from DBDA 2nd Ed. (Kruschke) ch. 07
  # metropolis algo. with 1 parameter

  # "posterior of theta" function
  posterior.theta <- function(theta, N, z, a, b) {
    posterior = theta^z * (1-theta)^(N-z) * theta^(a-1) * (1 - theta)^(b-1) / beta(a,b)
  }

  # initialization
  theta.history = rep(0,nrow = n.iter,ncol = 1)
  theta.current = theta.init
  theta.history[1] = theta.current
  # values given in text
  mu = 0
  N = 20
  z = 14
  a = 1
  b = 1

  # main loop
  for (i_iter in 2:n.iter) {
    theta.proposed = theta.current + rnorm(1, mean=mu, sd=sigma)
    if (theta.proposed < 0 | theta.proposed > 1) {
      p.move = 0
    } else {
      p.current = posterior.theta(theta.current, N, z, a, b)
      p.proposed = posterior.theta(theta.proposed, N, z, a, b)
      p.move = min(p.proposed/p.current, 1)
    }
    if (runif(1) < p.move) {
      theta.current = theta.proposed
    }
    theta.history[i_iter] = theta.current
  }

  # plotting results
  if (plot.yn == T) {
    par(mfrow = c(3, 1))
    hist(theta.history, nclass = 100, col = "orange", probability = T, xlab = "theta")
    den=density(theta.history)
    lines(den)
    plot(theta.history[(n.iter-100):n.iter], ((n.iter-100):n.iter), type = 'o', xlim = c(0,1), xlab="theta", ylab = "step in chain")
    plot(theta.history[1:100],1:100, type = 'o', xlim = c(0,1), xlab = "theta", ylab = "step in chain")
  }
  return(theta.history)
}

res = metropolis.ex01(10000, 0.1)

# initialization
n.iter = 10000; sigma = 0.1; counter = 0
z = c(6, 2); N = c(8, 7)
a = c(2, 2); b = c(2, 2);
n.par = 2
th.hist = matrix(0, nrow = n.iter*n.par, ncol = n.par)
theta = runif(2)

# function to calc. prob. move
prob.gibbs <- function(theta, proposed, N, z, a, b, idx){
  p.th=dbeta(theta[idx], z[idx]+a[idx], N[idx]-z[idx]+b[idx])
  p.pro=dbeta(proposed, z[idx]+a[idx], N[idx]-z[idx]+b[idx])
  return(p.pro/p.th)
}

# main loop
for (i.iter in 1:n.iter){
  for (i.par in 1:n.par){
    proposed = theta[i.par] + rnorm(1, mean=0, sd=sigma)
    if (proposed > 1) {proposed = 1}
    if (proposed < 0) {proposed = 0}
    p.move= min(1, prob.gibbs(theta, proposed, N, z, a, b, i.par))
    if (runif(1) < p.move){
      theta[i.par] = proposed
    }
    counter = counter + 1
    th.hist[counter, ] = theta
  }
}
par(mfrow=c(3,1))
HDI.plot(th.hist[,1])
HDI.plot(th.hist[,2])
plot(th.hist[,1],th.hist[,2], type='o',cex=0.1,xlim = c(0,1),ylim=c(0,1))
par(mfrow=c(1,1))

model.txt = "
model {
  for ( i_data in 1:Ntotal ) {
    y[ i_data ] ~ dbern( theta )
  }
  theta ~ dbeta( 1, 1 )
}"
writeLines(model.txt, "model.txt")

dat<-read.csv("http://www.matsuka.info/univ/course_folder/z15N50.csv")
y=dat$y
Ntotal=length(dat$y)
datalist = list(y=y,Ntotal=Ntotal)

# jags
jagsModel = jags.model(file="model.txt",data=datalist,n.chains=3,n.adapt=500)
update(jagsModel,n.iter=1000)
codaSamples=coda.samples(jagsModel,variable.names=c("theta"),n.iter=5000)
mcmcMat<-as.matrix(codaSamples)

par(mfrow=c(2,2))
cols=c("orange", "skyblue","pink")

# chain
mcmc1<-as.mcmc(codaSamples[[1]])
mcmc2<-as.mcmc(codaSamples[[2]])
mcmc3<-as.mcmc(codaSamples[[3]])
plot(mcmc1,type='l')
lines(mcmc2,col='red')
lines(mcmc3,col='blue')

# autocorrelation
ac1=autocorr(mcmc1,lags=0:50)
ac2=autocorr(mcmc2,lags=0:50)
ac3=autocorr(mcmc3,lags=0:50)
plot(ac1, type='o', pch = 20, col = cols[1], ylab = "Autocorrelation", xlab = "Lag")
lines(ac2, type='o', pch = 20, col = cols[2])
lines(ac3, type='o', pch = 20, col = cols[3])

# shrink factor
resALL=mcmc.list(mcmc1,mcmc2,mcmc3)
gd1=gelman.plot(resALL, auto.layout = F)

# density
den1=density(mcmc1)
den2=density(mcmc2)
den3=density(mcmc3)
plot(den1, type='l', col = cols[1], main = " ", xlab = "param. value",lwd=2.5)
lines(den2, col = cols[2], lwd=2.5)
lines(den3, col = cols[3], lwd=2.5)

par(mfrow=c(1,1))

model.txt = "
model {
  for ( i_data in 1:Ntotal ) {
    y[ i_data ] ~ dbern( theta[s[i_data]] )
  }
  for ( i_s in 1:Nsubj) {
    theta[i_s] ~ dbeta( 2, 2 )
  }
}"
writeLines(model.txt, "model.txt")

dat<-read.csv("http://www.matsuka.info/univ/course_folder/z6N8z2N7.csv")
y=dat$y
s=as.numeric(dat$s)
Ntotal=length(dat$y)
Nsubj=length(unique(s))
datalist = list(y=y,s=s,Ntotal=Ntotal,Nsubj=Nsubj)

jagsModel = jags.model(file="model.txt",data=datalist,n.chains=3,n.adapt=500)
update(jagsModel,n.iter=1000)
codaSamples=coda.samples(jagsModel,variable.names=c("theta"),n.iter=5000)
mcmcMat<-as.matrix(codaSamples)
par(mfrow=c(2,2))
cols=c("orange", "skyblue","pink")

# chain
mcmc1<-as.mcmc(codaSamples[[1]])
mcmc2<-as.mcmc(codaSamples[[2]])
mcmc3<-as.mcmc(codaSamples[[3]])
plot(temp1,type='l')
lines(temp2,col='red')
lines(temp3,col='blue')

# autocorrelation
ac1=autocorr(mcmc1,lags=0:50)
ac2=autocorr(mcmc2,lags=0:50)
ac3=autocorr(mcmc3,lags=0:50)
plot(ac1, type='o', pch = 20, col = cols[1], ylab = "Autocorrelation", xlab = "Lag")
lines(ac2, type='o', pch = 20, col = cols[2])
lines(ac3, type='o', pch = 20, col = cols[3])

# shrink factor
resALL=mcmc.list(mcmc1,mcmc2,mcmc3)
gd1=gelman.plot(resALL, auto.layout = F)

# density
den1=density(mcmc1)
den2=density(mcmc2)
den3=density(mcmc3)
plot(den1, type='l', col = cols[1], main = " ", xlab = "param. value",lwd=2.5)
lines(den2, col = cols[2], lwd=2.5)
lines(den3, col = cols[3], lwd=2.5)

par(mfrow=c(1,1))

model.txt = "
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dnorm( mu , 1/sigma^2  )
  }
  mu ~ dnorm( meanY , 1/(100*sdY)^2 )
  sigma ~ dunif( sdY/1000 , sdY*1000 )
}
"
writeLines(model.txt, "model.txt")

dat<-read.csv("http://www.matsuka.info/univ/course_folder/TwoGroupIQ.csv")
y = dat$Score[dat$Group=="Smart Drug"]
Ntotal = length(y)

dataList = list(y = y, Ntotal = Ntotal, meanY = mean(y), sdY = sd(y))
jagsModel = jags.model("model.txt", data=dataList, n.chains=3, n.adapt=1000 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=c("mu","sigma"), n.iter=5000, thin=5)
mcmcMat<-as.matrix(codaSamples)

# calculating & plotting normality
par(mfrow=c(2,2))
HDI.plot(mcmcMat[,1])
hist(y,nclass=15,probability = T)
x.temp = seq(40,200,0.1)
n.plot = 100
randperm = sample(1:nrow(mcmcMat),n.plot)
for (i.plot in 1:n.plot){
  norm.temp=dnorm(x.temp,mean=mcmcMat[randperm[i.plot],1],sd=mcmcMat[randperm[i.plot],2])
  lines(x.temp,norm.temp,col='orange')
}
HDI.plot(mcmcMat[,2])

# calculating & plotting effect size
effect.size=(mcmcMat[,"mu"]-100)/mcmcMat[,"sigma"]
HDI.plot(effect.size)


#
dat<-read.csv("http://www.matsuka.info/univ/course_folder/TwoGroupIQ.csv")
y = dat$Score[dat$Group=="Smart Drug"]
Ntotal = length(y)

model.txt="
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dt( mu , 1/sigma^2 , nu )
  }
  mu ~ dnorm( meanY , 1/(100*sdY)^2 )
  sigma ~ dunif( sdY/1000 , sdY*1000 )
  nu <- nuMinusOne+1
  nuMinusOne ~ dexp(1/30.0)
}"
writeLines(model.txt, "model.txt")

dataList = list(y = y, Ntotal = Ntotal, meanY = mean(y), sdY = sd(y))
jagsModel = jags.model("model.txt", data=dataList, n.chains=3, n.adapt=1000 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=c("mu","sigma","nu"), n.iter=5000, thin=5)
mcmcMat<-as.matrix(codaSamples)

# calculating & plotting normality
par(mfrow=c(3,2))
HDI.plot(mcmcMat[,1])
HDI.plot(mcmcMat[,3])
normality=log10(mcmcMat[,"nu"])
HDI.plot(normality)
effect.size=(mcmcMat[,"mu"]-100)/mcmcMat[,"sigma"]
HDI.plot(effect.size)

hist(y,nclass=20,probability = T)
n.plot = 100
randperm = sample(1:nrow(mcmcMat),n.plot)
for (i.plot in 1:n.plot){
  x.temp1 = seq(40,200,0.1)
  x.temp2 = (x.temp1 - mcmcMat[randperm[i.plot],1])/mcmcMat[randperm[i.plot],3]
  t.temp=dt(x.temp2,mcmcMat[randperm[i.plot],2])/mcmcMat[randperm[i.plot],3]
  lines(x.temp1,t.temp,col='orange')
}

par(mfrow=c(2,2))
plot(mcmcMat[,1],mcmcMat[,3],col='orange')
plot(mcmcMat[,1],log10(mcmcMat[,2]),col='orange')
plot(0,0,type='n')
plot(log10(mcmcMat[,2]),mcmcMat[,3],col='orange')


dat<-read.csv("http://www.matsuka.info/univ/course_folder/TwoGroupIQ.csv")
y = dat$Score
group = as.numeric(dat$Group)
Ntotal = length(y)
Ngroup = length(unique(group))
model.txt="
model {
  for ( i in 1:Ntotal ) {
    y[i] ~ dt( mu[group[i]] , 1/sigma[group[i]]^2 , nu )
  }
  for (j in 1:Ngroup){
    mu[j] ~ dnorm( meanY , 1/(100*sdY)^2 )
    sigma[j] ~ dunif( sdY/1000 , sdY*1000 )
  }
  nu <- nuMinusOne+1
  nuMinusOne ~ dexp(1/29)
}"
writeLines(model.txt, "model.txt")


dataList = list(y = y, Ntotal = Ntotal, meanY = mean(y), sdY = sd(y),Ngroup=Ngroup,group=group)
jagsModel = jags.model("model.txt", data=dataList, n.chains=3, n.adapt=5000 )
update( jagsModel , n.iter=1000)
codaSamples = coda.samples( jagsModel , variable.names=c("mu","sigma","nu"), n.iter=5000, thin=5)
mcmcMat<-as.matrix(codaSamples)

# plotting result
par(mfrow=c(5,2))
HDI.plot(mcmcMat[,1],xlabel="placebo Mean")

hist(y[dat$Group=="Placebo"],nclass=20,probability = T)
n.plot = 100
randperm = sample(1:nrow(mcmcMat),n.plot)
for (i.plot in 1:n.plot){
  x.temp1 = seq(40,200,0.1)
  x.temp2 = (x.temp1 - mcmcMat[randperm[i.plot],1])/mcmcMat[randperm[i.plot],4]
  t.temp=dt(x.temp2,mcmcMat[randperm[i.plot],3])/mcmcMat[randperm[i.plot],4]
  lines(x.temp1,t.temp,col='orange')
}

HDI.plot(mcmcMat[,2],xlabel="smart drug Mean")

hist(y[dat$Group=="Smart Drug"],nclass=20,probability = T)
n.plot = 100
randperm = sample(1:nrow(mcmcMat),n.plot)
for (i.plot in 1:n.plot){
  x.temp1 = seq(40,200,0.1)
  x.temp2 = (x.temp1 - mcmcMat[randperm[i.plot],2])/mcmcMat[randperm[i.plot],5]
  t.temp=dt(x.temp2,mcmcMat[randperm[i.plot],3])/mcmcMat[randperm[i.plot],5]
  lines(x.temp1,t.temp,col='orange')
}

HDI.plot(mcmcMat[,4],xlabel="placebo scale")

HDI.plot(mcmcMat[,2]-mcmcMat[,1],xlabel="Difference of Means")

HDI.plot(mcmcMat[,5],xlabel="smart drug scale")

HDI.plot(mcmcMat[,5]-mcmcMat[,4],xlabel="Difference of Scales")

HDI.plot(log10(mcmcMat[,3]),xlabel="Normality")

effect.size = (mcmcMat[,2]-mcmcMat[,1])/sqrt((mcmcMat[,5]^2+mcmcMat[,4]^2)/2)
HDI.plot(effect.size,xlabel="effect size")

Posted in UT

データ解析基礎論B 判別分析+

library(MASS)
dat<-data.frame(writing=c(68,85,50,54,66,35,56,25,43,70),
                interview=c(65,80,95,70,75,55,65,75,50,40), 
                cl=c(rep("A",5),rep("N",5)))
dat.lda<-lda(cl~.,data=dat)

intcpt = (dat.lda$scaling[1]*dat.lda$means[1,1] +
          dat.lda$scaling[2]*dat.lda$means[1,2] + 
          dat.lda$scaling[1]*dat.lda$means[2,1] + 
          dat.lda$scaling[2]*dat.lda$means[2,2])/2
z = as.matrix(dat[,1:2])%*%dat.lda$scaling-intcpt

new.dim.slope = dat.lda$scaling[1]/dat.lda$scaling[2]

disc.intcpt = intcpt / dat.lda$scaling[2]
disc.slope = -dat.lda$scaling[1] / dat.lda$scaling[2]

ggplot(dat, aes(x = writing, y= interview, color = cl)) +
  geom_point(size = 4) +
  geom_abline(aes(intercept = intcpt, slope = new.dim.slope )) +
  geom_abline(aes(intercept = disc.intcpt, slope = disc.slope ),color = "red") + xlim(30,100)+ylim(30,100)

dat<-read.csv("http://matsuka.info/data_folder/tdkDA01.csv", header=T)
dat.lda<-lda(class~.,dat)
lda.pred<-predict(dat.lda,dat)
table(lda.pred$class, dat$class)

dat.lda<-lda(class~.,dat, CV=T)
dat.cl = dat.lda$posterior[,1]>dat.lda$posterior[,2]
table(dat.cl, dat$class)


dat<-read.csv("http://matsuka.info/data_folder/tdkDA02.csv",header=T)
dat.lda=lda(class~.,data=dat)
lda.pred <- predict(dat.lda, dat)
plot(dat.lda, dimen=3, col=as.numeric(lda.pred$class),cex=2)

dat.km<-kmeans(dat[,1:6],5)
table(lda.pred$class,dat.km$cluster)
plot(dat,col=dat.km$cluster,pch=20)
plot(dat.lda, dimen=2, col=as.numeric(lda.pred$class),cex=2)


dat<-read.csv("http://www.matsuka.info/data_folder/tdkCFA.csv")
dat1<-subset(dat,dat$popularity<5)
dat2<-subset(dat,dat$popularity>4 & dat$popularity<6)
dat3<-subset(dat,dat$popularity>6)
dat1$popularity="LP";dat2$popularity="MP";dat3$popularity="VP"
datT=rbind(dat1,dat2,dat3)
datT.lda<-lda(popularity~.,datT)
datT.pred<-predict(datT.lda,datT)
table(datT.pred$class,datT$popularity)   
par(mfrow=c(1,2))
plot(datT.lda,col=c(rep('red',20),rep('blue',28),rep('green',29)),cex=1.5)
plot(datT.lda,col=as.numeric(datT.pred$class),cex=1.5)
par(mfrow=c(1,1))

library(nnet)
dat<-read.csv("http://www.matsuka.info/data_folder/tdkReg01.csv")
set.seed(5)
x = dat[,1:3]
y = dat[,4]
dat.nnet = nnet(x,y, size = 150, linout= TRUE, maxit = 1000)
nnet.pred <-predict(dat.nnet,dat)
cor(dat.nnet$fitted.values,dat$sales)^2

dat.lm<-lm(sales~.,data=dat)
plot(dat.nnet$fitted.values, dat$sales,pch=20,col='black')
points(predict(dat.lm), dat$sales,pch=20,col='red')

n.data = nrow(dat);n.sample = n.data*0.6; n.rep = 100 
trainNN.cor = rep(0,n.rep); trainLM.cor = rep(0,n.rep)
testNN.cor = rep(0,n.rep); testLM.cor = rep(0,n.rep)
for (i.rep in 1:n.rep){  
  randperm = sample(1:n.data)  
  train.idx = randperm[1:n.sample]  
  test.idx = randperm[(n.sample+1):n.data]  
  dat.nnet <- nnet(sales~.,size = 100, linout=T, maxit=1000, data = dat[train.idx,])  
  dat.lm <-lm(sales~.,data=dat[train.idx, ])  
  trainNN.cor[i.rep] <- cor(predict(dat.nnet,dat[train.idx, ]), dat[train.idx,]$sales)  
  trainLM.cor[i.rep] <- cor(predict(dat.lm,dat[train.idx, ]), dat[train.idx,]$sales)  
  testNN.cor[i.rep] <- cor(predict(dat.nnet,dat[test.idx, ]), dat[test.idx,]$sales)  
  testLM.cor[i.rep] <- cor(predict(dat.lm,dat[test.idx, ]), dat[test.idx,]$sales)
}
print(c(mean(trainNN.cor,na.rm=T),mean(testNN.cor,na.rm=T)))
print(c(mean(trainLM.cor,na.rm=T),mean(testLM.cor,na.rm=T)))


n.data = nrow(dat);n.sample = n.data*0.6; n.rep = 100 
trainNN.cor = rep(0,n.rep); trainLM.cor = rep(0,n.rep)
testNN.cor = rep(0,n.rep); testLM.cor = rep(0,n.rep)
for (i.rep in 1:n.rep){  
  randperm = sample(1:n.data)  
  train.idx = randperm[1:n.sample]  
  test.idx = randperm[(n.sample+1):n.data]  
  dat.nnet <- nnet(sales~.,size = 30, linout=T,decay= 0.1, maxit=1000, data = dat[train.idx,])  
  dat.lm <-lm(sales~.,data=dat[train.idx, ])  
  trainNN.cor[i.rep] <- cor(predict(dat.nnet,dat[train.idx, ]), dat[train.idx,]$sales)  
  trainLM.cor[i.rep] <- cor(predict(dat.lm,dat[train.idx, ]), dat[train.idx,]$sales)  
  testNN.cor[i.rep] <- cor(predict(dat.nnet,dat[test.idx, ]), dat[test.idx,]$sales)  
  testLM.cor[i.rep] <- cor(predict(dat.lm,dat[test.idx, ]), dat[test.idx,]$sales)
}
print(c(mean(trainNN.cor,na.rm=T),mean(testNN.cor,na.rm=T)))
print(c(mean(trainLM.cor,na.rm=T),mean(testLM.cor,na.rm=T)))


dat<-read.csv("http://matsuka.info/data_folder/tdkDA01.csv", header =T)
dat.nnet<-nnet(class~.,dat,size=30,maxit=1000,decay=0.05)
dat.pred<-predict(dat.nnet,dat,type="class")
table(dat.pred,dat$class)

dat.glm<-glm(class~., family="binomial",dat)
glm.pred<-predict(dat.glm, dat, type="response")>0.5
table(glm.pred,dat$class)

dat<-read.csv("http://www.matsuka.info/data_folder/cda7-16.csv")


dat.nnet<-nnet(survival~., dat, size=30, maxit=1000, decay=0.01)
dat.pred<-predict(dat.nnet,dat,type="class")
table(dat.pred,dat$survival)

Ns = summary(dat$survival)
(Ns[1]/Ns[2])^-1

wts = rep(1,nrow(dat))
wts[which(dat$survival=="no")]=45
dat.nnet<-nnet(survival~., weights=wts, dat, size=30, maxit=1000, decay=0.01)
dat.pred<-predict(dat.nnet,dat,type="class")
table(dat.pred,dat$survival)

dat<-read.csv("http://matsuka.info/data_folder/tdkDA02.csv",header=T)
class.id<-class.ind(dat$class)
x = dat[,1:6]
dat.nnet<-nnet(x,class.id,size=30, maxit=1000, decay=0.01, softmax=TRUE)
max.id = apply(dat.nnet$fitted.values,1,which.max)
table(max.id,dat$class)

dat<-read.table("http://www.matsuka.info/data_folder/tdkPCA01.txt")
dat.nnet<-nnet(dat,dat,size=2, maxit=1000, decay=0.01, linout=TRUE)
cor(dat.nnet$fitted.values,dat)

広域システム特別講義II DL with Keras

dogs&cats_data

library(keras)

### iris data
dat = iris
x_train = dat[,1:3] %>% as.matrix()
y_train <- dat[,4] %>% to_categorical()

model_iris <- keras_model_sequential() %>% 
  layer_dense(units=20, activation = "relu", input_shape = ncol(x_train)) %>%
  layer_dense(units = 10, activation = "relu") %>%
  layer_dense(units = 3, activation = "softmax")

summary(model_iris)

model_iris %>% compile(
  optimizer = "rmsprop",
  loss = "categorical_crossentropy",
  metrics = c("accuracy")
)

model_iris %>% fit(
  x_train, 
  y_train,
  epochs = 100, 
  batch_size = 5
)

model_iris %>% evaluate(x_train,y_train)

### with vaidation
history <- model_iris %>% fit(x_train,
                         y_train,
                         validation_split = 0.20,
                         epochs=300,
                         batch_size = 5,
                         shuffle = T)

plot(history)



normalizer <- function(x) {
  min_x = apply(x,2,min)
  max_x = apply(x,2,max)
  num <- t(x) - min_x
  denom <- max_x - min_x
  normalized = t(num/denom)
  return (normalized)
}

x_trainN = normalizer(x_train)

model_iris <- keras_model_sequential() %>% 
  layer_dense(units=20, activation = "relu", input_shape = ncol(x_train)) %>%
  layer_dense(units = 10, activation = "relu") %>%
  layer_dense(units = 3, activation = "softmax")

model_iris %>% compile(
  optimizer = "rmsprop",
  loss = "categorical_crossentropy",
  metrics = c("accuracy")
)

history_N <- model_iris %>% fit(x_trainN,
                              y_train,
                              validation_split = 0.20,
                              epochs=150,
                              batch_size = 5,
                              shuffle = T)
plot(history_N)


### reuter
imdb <- dataset_imdb(num_words = 10000)
c(c(train_data, train_labels), c(test_data, test_labels)) %<-% imdb
vectorize_sequences <- function(sequences, dimension = 10000) {
  results <- matrix(0, nrow = length(sequences), ncol = dimension)
  for (i in 1:length(sequences))
    results[i, sequences[[i]]] <- 1
  results
}

x_trainTEMP <- vectorize_sequences(train_data)
x_test <- vectorize_sequences(test_data)
y_trainTEMP <- as.numeric(train_labels)
y_test <- as.numeric(test_labels)

val_idx = 1:1000
x_val = x_trainTEMP[val_idx, ]
y_val = y_trainTEMP[val_idx]
x_train = x_trainTEMP[-val_idx, ]
y_train = y_trainTEMP[-val_idx]

original_model <- keras_model_sequential() %>% 
  layer_dense(units = 16, activation = "relu", input_shape = c(10000)) %>% 
  layer_dense(units = 16, activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")

original_model %>% compile(
  optimizer = "rmsprop",
  loss = "binary_crossentropy",
  metrics = c("accuracy")
)

history_orig <- original_model %>% fit(x_train,
                                      y_train,
                                      epochs=20,
                                      batch_size = 512,
                                      validation_data = list(x_val, y_val))
plot(history_orig)
original_model %>% evaluate(x_test, y_test)

smaller_model <- keras_model_sequential() %>% 
  layer_dense(units = 4, activation = "relu", input_shape = c(10000)) %>% 
  layer_dense(units = 4, activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")
smaller_model %>% compile(
  optimizer = "rmsprop",
  loss = "binary_crossentropy",
  metrics = c("accuracy")
)

history_small <- smaller_model %>% fit(x_train,
                                       y_train,
                                       epochs=20,
                                       batch_size = 512,
                                       validation_data = list(x_val, y_val))
plot(history_small)
smaller_model %>% evaluate(x_test, y_test)

bigger_model <- keras_model_sequential() %>% 
  layer_dense(units = 512, activation = "relu", input_shape = c(10000)) %>% 
  layer_dense(units = 512, activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")
bigger_model %>% compile(
  optimizer = "rmsprop",
  loss = "binary_crossentropy",
  metrics = c("accuracy")
)

history_big <- bigger_model %>% fit(x_train,
                                       y_train,
                                       epochs=20,
                                       batch_size = 512,
                                       validation_data = list(x_val, y_val))
plot(history_big)


model_L2 <- keras_model_sequential() %>% 
  layer_dense(units = 16, kernel_regularizer = regularizer_l2(0.0001),
              activation = "relu", input_shape = c(10000)) %>% 
  layer_dense(units = 16, kernel_regularizer = regularizer_l2(0.0001),
              activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")
model_L2 %>% compile(
  optimizer = "rmsprop",
  loss = "binary_crossentropy",
  metrics = c("accuracy")
)

history_L2 <- model_L2 %>% fit(x_train,
                               y_train,
                               epochs=20,
                               batch_size = 512,
                               validation_data = list(x_val, y_val))
plot(history_L2)
model_L2 %>% evaluate(x_test, y_test)


model_DO <- keras_model_sequential() %>% 
  layer_dense(units = 16,activation = "relu", input_shape = c(10000)) %>%
  layer_dropout(rate = 0.5) %>%
  layer_dense(units = 16,activation = "relu") %>% 
  layer_dropout(rate = 0.5) %>%
  layer_dense(units = 1, activation = "sigmoid")
model_DO %>% compile(
  optimizer = "rmsprop",
  loss = "binary_crossentropy",
  metrics = c("accuracy")
)

history_DO <- model_DO %>% fit(x_train,
                               y_train,
                               epochs=20,
                               batch_size = 512,
                               validation_data = list(x_val, y_val))
plot(history_DO)
model_DO %>% evaluate(x_test, y_test)


### MNIST

mnist <- dataset_mnist()
c(c(tr_img, tr_lab),c(te_img, te_lab)) %<-% mnist
tr_img <- array_reshape(tr_img, c(60000,28,28,1))
tr_img = tr_img / 255
te_img <- array_reshape(te_img, c(10000,28,28,1))
te_img = te_img / 255
tr_lab = to_categorical(tr_lab)
te_lab = to_categorical(te_lab)

model <- keras_model_sequential() %>% 
  layer_conv_2d(filter = 32, kernel_size = c(3,3), 
                activation = "relu",input_shape = c(28,28,1)) %>%
  layer_max_pooling_2d(pool_size = c(2,2)) %>%
  layer_conv_2d(filter = 64, kernel_size = c(3,3),activation = "relu") %>%
  layer_max_pooling_2d(pool_size = c(2,2)) %>%
  layer_flatten() %>%
  layer_dense(units =64, activation = "relu") %>%
  layer_dense(units =10, activation = "softmax")
  
model %>% compile(
  optimizer = "rmsprop",
  loss = "categorical_crossentropy",
  metrics = c("accuracy")
)

history <- model %>% fit(
  tr_img, tr_lab,
  epochs = 5, 
  validation_split = 0.20,
  batch_size = 64
)

model %>% evaluate(te_img, te_lab)


original_dataset_dir <- "~/Downloads/dogs-vs-cats/train"
base_dir <- "~/Downloads/cats_and_dogs_small/"
dir.create(base_dir)
train_dir <- file.path(base_dir, "train")
dir.create(train_dir)
validation_dir <- file.path(base_dir, "validation")
dir.create(validation_dir)
test_dir <- file.path(base_dir, "test")
dir.create(test_dir)
train_cats_dir <- file.path(train_dir, "cats")
dir.create(train_cats_dir)
train_dogs_dir <- file.path(train_dir, "dogs")
dir.create(train_dogs_dir)
validation_cats_dir <- file.path(validation_dir, "cats")
dir.create(validation_cats_dir)
validation_dogs_dir <- file.path(validation_dir, "dogs")
dir.create(validation_dogs_dir)
test_cats_dir <- file.path(test_dir, "cats")
dir.create(test_cats_dir)
test_dogs_dir <- file.path(test_dir, "dogs")
dir.create(test_dogs_dir)
fnames <- paste0("cat.", 1:1000, ".jpg")
file.copy(file.path(original_dataset_dir, fnames), 
          file.path(train_cats_dir)) 
fnames <- paste0("cat.", 1001:1500, ".jpg")
file.copy(file.path(original_dataset_dir, fnames), 
          file.path(validation_cats_dir))
fnames <- paste0("cat.", 1501:2000, ".jpg")
file.copy(file.path(original_dataset_dir, fnames),
          file.path(test_cats_dir))
fnames <- paste0("dog.", 1:1000, ".jpg")
file.copy(file.path(original_dataset_dir, fnames),
          file.path(train_dogs_dir))
fnames <- paste0("dog.", 1001:1500, ".jpg")
file.copy(file.path(original_dataset_dir, fnames),
          file.path(validation_dogs_dir)) 
fnames <- paste0("dog.", 1501:2000, ".jpg")
file.copy(file.path(original_dataset_dir, fnames),
          file.path(test_dogs_dir))


model <- keras_model_sequential() %>% 
  layer_conv_2d(filters = 32, kernel_size = c(3, 3), activation = "relu",
                input_shape = c(150, 150, 3)) %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_conv_2d(filters = 64, kernel_size = c(3, 3), activation = "relu") %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_conv_2d(filters = 128, kernel_size = c(3, 3), activation = "relu") %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_conv_2d(filters = 128, kernel_size = c(3, 3), activation = "relu") %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_flatten() %>% 
  layer_dense(units = 512, activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")

summary(model)

model %>% compile(
  loss = "binary_crossentropy",
  optimizer = optimizer_rmsprop(lr = 1e-4),
  metrics = c("acc")
)

train_datagen <- image_data_generator(rescale = 1/255)
validation_datagen <- image_data_generator(rescale = 1/255)
train_generator <- flow_images_from_directory(
  train_dir,
  train_datagen,
  target_size = c(150, 150),
  batch_size = 20,
  class_mode = "binary"
)

validation_generator <- flow_images_from_directory(
  validation_dir,
  validation_datagen,
  target_size = c(150, 150),
  batch_size = 20,
  class_mode = "binary"
)

history <- model %>% fit_generator(
  train_generator,
  steps_per_epoch = 100,
  epochs = 10,
  validation_data = validation_generator,
  validation_steps = 50
)


model <- keras_model_sequential() %>% 
  layer_conv_2d(filters = 32, kernel_size = c(3, 3), activation = "relu",
                input_shape = c(150, 150, 3)) %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_conv_2d(filters = 64, kernel_size = c(3, 3), activation = "relu") %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_conv_2d(filters = 128, kernel_size = c(3, 3), activation = "relu") %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_conv_2d(filters = 128, kernel_size = c(3, 3), activation = "relu") %>% 
  layer_max_pooling_2d(pool_size = c(2, 2)) %>% 
  layer_flatten() %>% 
  layer_dropout(rate = 0.5) %>% 
  layer_dense(units = 512, activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")  

model %>% compile(
  loss = "binary_crossentropy",
  optimizer = optimizer_rmsprop(lr = 1e-4),
  metrics = c("acc")
)

datagen <- image_data_generator(
  rescale = 1/255,
  rotation_range = 40,
  width_shift_range = 0.2,
  height_shift_range = 0.2,
  shear_range = 0.2,
  zoom_range = 0.2,
  horizontal_flip = TRUE
)

test_datagen <- image_data_generator(rescale = 1/255)

train_generator <- flow_images_from_directory(
  train_dir,
  datagen,
  target_size = c(150, 150),
  batch_size = 32,
  class_mode = "binary"
)

validation_generator <- flow_images_from_directory(
  validation_dir,
  test_datagen,
  target_size = c(150, 150),
  batch_size = 32,
  class_mode = "binary"
)

history <- model %>% fit_generator(
  train_generator,
  steps_per_epoch = 100,
  epochs = 10,
  validation_data = validation_generator,
  validation_steps = 50
)

datagen <- image_data_generator(
  rotation_range = 40,
  width_shift_range = 0.2,
  height_shift_range = 0.2,
  shear_range = 0.2,
  zoom_range = 0.2,
  horizontal_flip = FALSE,
  fill_mode = "nearest"
)

conv_base <- application_vgg16(
  weights = "imagenet",
  include_top = FALSE,
  input_shape = c(150, 150, 3)
)

summary(conv_base)

model <- keras_model_sequential() %>% 
  conv_base %>% 
  layer_flatten() %>% 
  layer_dense(units = 256, activation = "relu") %>% 
  layer_dense(units = 1, activation = "sigmoid")

train_datagen = image_data_generator(
  rescale = 1/255,
  rotation_range = 40,
  width_shift_range = 0.2,
  height_shift_range = 0.2,
  shear_range = 0.2,
  zoom_range = 0.2,
  horizontal_flip = TRUE,
  fill_mode = "nearest"
)
test_datagen <- image_data_generator(rescale = 1/255)
train_generator <- flow_images_from_directory(
  train_dir,
  train_datagen,
  target_size = c(150, 150),
  batch_size = 20,
  class_mode = "binary"
)
validation_generator <- flow_images_from_directory(
  validation_dir,
  test_datagen,
  target_size = c(150, 150),
  batch_size = 20,
  class_mode = "binary"
)
model %>% compile(
  loss = "binary_crossentropy",
  optimizer = optimizer_rmsprop(lr = 2e-5),
  metrics = c("accuracy")
)
history <- model %>% fit_generator(
  train_generator,
  steps_per_epoch = 100,
  epochs = 3,
  validation_data = validation_generator,
  validation_steps = 50
)

unfreeze_weights(conv_base, from = "block5_conv1")
model %>% compile(
  loss = "binary_crossentropy",
  optimizer = optimizer_rmsprop(lr = 1e-5),
  metrics = c("accuracy")
)


history <- model %>% fit_generator(
  train_generator,
  steps_per_epoch = 100,
  epochs = 10,
  validation_data = validation_generator,
  validation_steps = 50
)

test_generator <- flow_images_from_directory(
  test_dir,
  test_datagen,
  target_size = c(150, 150),
  batch_size = 20,
  class_mode = "binary"
)
model %>% evaluate_generator(test_generator, steps = 50)

Posted in UT