ramy  2017-12-04 08:11:32  机器学习 |   查看评论   
AlexNet On Tensorflow
 
  caffe的AlexNet可以到/models/bvlc_alexnet/train_val.prototxt 去看看具体的网络结构,这里我会弄点基于Tensorflow的AlexNet, 代码在:
  1. from numpy import 
  2. import os
  3. from pylab import 
  4. import numpy as np
  5. import matplotlib.pyplot as plt
  6. import matplotlib.cbook as cbook
  7. import time
  8. from scipy.misc import imread
  9. from scipy.misc import imresize
  10. import matplotlib.image as mpimg
  11. from scipy.ndimage import filters
  12. import urllib
  13. from numpy import random


  14. import tensorflow as tf
  15. from caffe_classes import class_names

  16.  train_x = zeros((1, 227,227,3)).astype(float32)
  17.  train_y = zeros((1, 1000))
  18.  xdim = train_x.shape[1:]
  19.  ydim = train_y.shape[1]
  20.  

  21. net_data = load("bvlc_alexnet.npy").item()

  22.  def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w,  padding="VALID", group=1):
  23.      '''From [http://github.com/ethereon/caffe-tensorflow](http://github.com/ethereon/caffe-tensorflow)
  24.      '''
  25.      c_i = input.get_shape()[-1]
  26.      assert c_i%group==0
  27.      assert c_o%group==0
  28.      convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)


  29.      if group==1:
  30.          conv = convolve(input, kernel)
  31.      else:
  32.          input_groups = tf.split(3, group, input)
  33.          kernel_groups = tf.split(3, group, kernel)
  34.          output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
  35.          conv = tf.concat(3, output_groups)
  36.      return  tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())



  37.  x = tf.Variable(i)

  38.  #conv1
  39.  #conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
  40.  k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4
  41.  conv1W = tf.Variable(net_data["conv1"][0])
  42.  conv1b = tf.Variable(net_data["conv1"][1])
  43.  conv1_in = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=1)
  44.  conv1 = tf.nn.relu(conv1_in)

  45.  #lrn1
  46.  #lrn(2, 2e-05, 0.75, name='norm1')
  47.  radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
  48.  lrn1 = tf.nn.local_response_normalization(conv1,
  49.                                                    depth_radius=radius,
  50.                                                    alpha=alpha,
  51.                                                    beta=beta,
  52.                                                    bias=bias)

  53.  #maxpool1
  54.  #max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
  55.  k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
  56.  maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)


  57.  #conv2
  58.  #conv(5, 5, 256, 1, 1, group=2, name='conv2')
  59.  k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
  60.  conv2W = tf.Variable(net_data["conv2"][0])
  61.  conv2b = tf.Variable(net_data["conv2"][1])
  62.  conv2_in = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
  63.  conv2 = tf.nn.relu(conv2_in)


  64.  #lrn2
  65.  #lrn(2, 2e-05, 0.75, name='norm2')
  66.  radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
  67.  lrn2 = tf.nn.local_response_normalization(conv2,
  68.                                                    depth_radius=radius,
  69.                                                    alpha=alpha,
  70.                                                    beta=beta,
  71.                                                    bias=bias)

  72.  #maxpool2
  73.  #max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
  74.  k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
  75.  maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)

  76.  #conv3
  77.  #conv(3, 3, 384, 1, 1, name='conv3')
  78.  k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
  79.  conv3W = tf.Variable(net_data["conv3"][0])
  80.  conv3b = tf.Variable(net_data["conv3"][1])
  81.  conv3_in = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
  82.  conv3 = tf.nn.relu(conv3_in)

  83.  #conv4
  84.  #conv(3, 3, 384, 1, 1, group=2, name='conv4')
  85.  k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
  86.  conv4W = tf.Variable(net_data["conv4"][0])
  87.  conv4b = tf.Variable(net_data["conv4"][1])
  88.  conv4_in = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
  89.  conv4 = tf.nn.relu(conv4_in)


  90.  #conv5
  91.  #conv(3, 3, 256, 1, 1, group=2, name='conv5')
  92.  k_h = 3; k_w = 3; c_o = 256; s_h = 1; s_w = 1; group = 2
  93.  conv5W = tf.Variable(net_data["conv5"][0])
  94.  conv5b = tf.Variable(net_data["conv5"][1])
  95.  conv5_in = conv(conv4, conv5W, conv5b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
  96.  conv5 = tf.nn.relu(conv5_in)

  97.  #maxpool5
  98.  #max_pool(3, 3, 2, 2, padding='VALID', name='pool5')
  99.  k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
  100.  maxpool5 = tf.nn.max_pool(conv5, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)

  101.  #fc6
  102.  #fc(4096, name='fc6')
  103.  fc6W = tf.Variable(net_data["fc6"][0])
  104.  fc6b = tf.Variable(net_data["fc6"][1])
  105.  fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [1, int(prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)

  106.  #fc7
  107.  #fc(4096, name='fc7')
  108.  fc7W = tf.Variable(net_data["fc7"][0])
  109.  fc7b = tf.Variable(net_data["fc7"][1])
  110.  fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b)

  111.  #fc8
  112.  #fc(1000, relu=False, name='fc8')
  113.  fc8W = tf.Variable(net_data["fc8"][0])
  114.  fc8b = tf.Variable(net_data["fc8"][1])
  115.  fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)


  116.  #prob
  117.  #softmax(name='prob'))
  118.  prob = tf.nn.softmax(fc8)

  119.  init = tf.initialize_all_variables()
  120.  sess = tf.Session()
  121.  sess.run(init)

  122.  output = sess.run(prob)
  123.  ################################################################################

  124.  #Output:

  125.  inds = argsort(output)[0,:]
  126.  for i in range(5):
  127.      print class_names[inds[-1-i]], output[0, inds[-1-i]]

 

除特别注明外,本站所有文章均为 赢咖4注册 原创,转载请注明出处来自机器学习进阶笔记之三 | 深入理解Alexnet

留言与评论(共有 0 条评论)
   
验证码:
[lianlun]1[/lianlun]