single layer perceptron

勾配法と確率的最適化による学習例:

# 2変数からなる16種類のロジック
x=matrix(c(1,1,1,1,1,0,1,0,1,1,0,0),ncol=4);
y=matrix(c(0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,1,1,0,0,1,0,
  1,1,0,1,0,1,0,1,1,0,0,1,1,1,0,1,1,0,1,1,0,1,1,0,1,1,1,1,1,1,1),byrow=T,ncol=4)

> x # input matrix
     [,1] [,2] [,3] [,4]
[1,]    1    1    1    1   # X0
[2,]    1    1    0    0   # X1
[3,]    1    0    1    0   # X2
> y
      [,1] [,2] [,3] [,4]
 [1,]    0    0    0    0  # none
 [2,]    0    0    0    1  # notX1 & notX2
 [3,]    0    0    1    0  # notX1 & X2
 [4,]    0    1    0    0  # X1 & notX2
 [5,]    1    0    0    0  # X1 & X2
 [6,]    0    0    1    1  # notX1
 [7,]    0    1    0    1  # notX2
 [8,]    1    0    0    1  # {X1 & X2} or {notX1 & notX2}
 [9,]    0    1    1    0  # {X1 & notX2} or {notX1 & X2}
[10,]    1    0    1    0  # X2
[11,]    1    1    0    0  # X1
[12,]    1    1    1    0  # X1 or X2
[13,]    1    1    0    1  # X1 or notX2
[14,]    1    0    1    1  # notX1 or X2
[15,]    0    1    1    1  # notX1 or notX2
[16,]    1    1    1    1  # all

# 勾配法
stepSize=0.1;gamma=5;logicID=2;
w=matrix(rnorm(3),nrow=1);
for (i_tr in 1:2000) {
  for (i_inst in 1:4){
    a=w%*%x[,i_inst,drop=F]
    s=1/(1+exp(-gamma*a))
    w=w+stepSize*(y[logicID,i_inst]-s)*s*(1-s)*gamma*x[,i_inst]
  }
}
for (i_inst in 1:4){
  a=w%*%x[,i_inst,drop=F]
  print(1/(1+exp(-gamma*a)))
}

# 確率的最適化法(焼きなまし法)
gamma=2;logicID=2;
w=matrix(rnorm(3),nrow=1);
err=4;width=1;sTemp=matrix(0,nrow=1,ncol=3);C=0.25
for (i_tr in 1:5000) {
  wTemp=w+rnorm(3,mean=0,sd=width)
  for (i_inst in 1:4) {
    a=wTemp%*%x[,i_inst,drop=F]
    sTemp[i_inst]=1/(1+exp(-gamma*a))
  }
  errTemp=sum((sTemp-y[logicID,])^2)
  delta=errTemp-err;
  prob=1/(1+exp(delta/(C*width)))
  if (runif(1) <  prob) {
    w=wTemp
    err=errTemp
  }
  width=width*0.99
}
for (i_inst in 1:4) {
  a=w%*%x[,i_inst,drop=F]
  print(1/(1+exp(-gamma*a)))
}

Leave a Reply