学习XOR

//f(x;W,c,w,b)=w*max{0, W*x+c}+b

#include <iostream>
#include <vector>
#include <algorithm>

template <class T>
double tanh(T& z) {
  double ret;
  ret = (exp(z)-exp((-1)*z))/(exp(z)+exp((-1)*z));
  return ret;
}

template <class T>
double sigmoid(T& z) {
  return 1.0f/(1.0f+exp((-1)*z));
}

int main() {
  int w[][2]={{1, 1}, {1,1}};
  int bias[]={0, -1};
  int weights[] = {1, -2};
  int x[][2]={{0, 0}, {0, 1}, {1, 0}, {1, 1}};
  int c[][2]={{0, 0}, {0, 0}, {0, 0}, {0, 0}};

  /*x[4][2] * w[2][2] = c[4][2]*/
  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<2;++j) {
      int sum = 0;
      for(size_t k=0;k<2;++k) {
        sum += x[i][k] * w[k][j];
      }
      c[i][j] = sum;
    }
  }

  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<2;++j) {
      std::cout<<c[i][j]<<" ";
    }
    std::cout<<std::endl;
  }

  std::cout<<"add bias, rectified linear unit:\n";

  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<2;++j) {
      c[i][j] = c[i][j] + bias[j];
      c[i][j] = std::max(c[i][j], 0);
      std::cout<<c[i][j]<<" ";
    }
    std::cout<<std::endl;
  }

  for(size_t i=0;i<4;++i) {
    for(size_t j=0;j<1;++j) {
      int sum=0;
      for(size_t k=0;k<2;++k) {
        sum += c[i][k] * weights[k];
      }
      c[i][j] = sum;
    }
  }

  std::cout<<"the XOR result:\n";
  for(size_t i=0; i<4; ++i) {
    for(size_t j=0;j<2;++j) {
      std::cout<<x[i][j]<<" ";
    }
  std::cout<<c[i][0]<<"\n";
  }

  return 0;
}

With the input patterns (0,0) and (1,1) located on opposite corners of the unit square, and likewise

for the other two input patterns (0,1) and (1,0), it is clear that we cannot construct a straight line

for a decision boundary so that (0,0) and (0,1) lie in one dicision region and (0,1) and (1,0) lie in the

other decision region. In other words, the singlelayer perceptron cannot solve the XOR problem.

原文地址:https://www.cnblogs.com/donggongdechen/p/9217023.html