1 from datetime import datetime 2 import math 3 import time 4 import tensorflow as tf 5 batch_size=32 6 num_batches=100 7 n_output=100 8 #定义显示节点的函数 9 def print_activations(t): 10 print(t.op.name, ' ',t.get_shape().as_list()) 11 12 #定义inference函数:该函数接受图像作为输入,返回最后一层pool5及相关参数 13 def inference(images): 14 parameters=[] 15 #设置第一层卷积操作 16 with tf.name_scope('conv1') as scope: 17 #生成权重变量 18 kernel=tf.Variable(tf.truncated_normal([11,11,3,64],dtype=tf.float32,stddev=1e-1),name='weights') 19 #做卷积操作 20 conv=tf.nn.conv2d(images,kernel,[1,4,4,1],padding='SAME') 21 #b定义偏置值初始化为0 22 biases=tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32),trainable=True,name='biases') 23 bias=tf.nn.bias_add(conv,biases) 24 conv1=tf.nn.relu(bias,scope) 25 print_activations(conv1) 26 parameters+=[kernel,biases] 27 #添加LRN层与最大池化层 28 lrn1=tf.nn.lrn(conv1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1') 29 pool1=tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool1') 30 print_activations(pool1) 31 #设置第二层卷积操作 32 33 with tf.name_scope('conv2') as scope: 34 kernel=tf.Variable(tf.truncated_normal([5,5,64,192],dtype=tf.float32,stddev=1e-1),name='weights') 35 conv=tf.nn.conv2d(pool1,kernel,[1,1,1,1],padding='SAME') 36 biases=tf.Variable(tf.constant(0.0,dtype=tf.float32,shape=[192]),trainable=True,name='biases') 37 bias=tf.nn.bias_add(conv,biases) 38 conv2=tf.nn.relu(bias,name=scope) 39 parameters+=[kernel,biases] 40 print_activations(conv2) 41 #添加LRN层与最大池化层 42 lrn2=tf.nn.lrn(conv2,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn2') 43 pool2=tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool2') 44 print_activations(pool2) 45 46 #设置第三层卷积神经网络 47 with tf.name_scope('conv3') as scope: 48 kernel=tf.Variable(tf.truncated_normal(shape=[3,3,192,384],stddev=1e-1,dtype=tf.float32),name='weights') 49 conv=tf.nn.conv2d(pool2,kernel,[1,1,1,1],padding='SAME') 50 biases=tf.Variable(tf.constant(0.0,dtype=tf.float32,shape=[384]),trainable=True,name='biases') 51 bias=tf.nn.bias_add(conv,biases) 52 conv3=tf.nn.relu(bias,name=scope) 53 parameters+=[kernel,biases] 54 print_activations(conv3) 55 56 #设置设置第四层卷积神经网络 57 with tf.name_scope('conv4') as scope: 58 kernel=tf.Variable(tf.truncated_normal(shape=[3,3,384,256],stddev=1e-1,dtype=tf.float32),name='weights') 59 conv=tf.nn.conv2d(conv3,kernel,[1,1,1,1],padding='SAME') 60 biases=tf.Variable(tf.constant(0.0,dtype=tf.float32,shape=[256]),trainable=True,name='biases') 61 bias=tf.nn.bias_add(conv,biases) 62 conv4=tf.nn.relu(bias,name=scope) 63 parameters+=[kernel,biases] 64 print_activations(conv4) 65 66 #设置设置第五层卷积神经网络 67 with tf.name_scope('conv5') as scope: 68 kernel=tf.Variable(tf.truncated_normal(shape=[3,3,256,256],stddev=1e-1,dtype=tf.float32),name='weights') 69 conv=tf.nn.conv2d(conv4,kernel,[1,1,1,1],padding='SAME') 70 biases=tf.Variable(tf.constant(0.0,dtype=tf.float32,shape=[256]),trainable=True,name='biases') 71 bias=tf.nn.bias_add(conv,biases) 72 conv5=tf.nn.relu(bias,name=scope) 73 parameters+=[kernel,biases] 74 print_activations(conv5) 75 pool5=tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool5') 76 print_activations(pool5) 77 return pool5, parameters 78 #设置全连接层 79 def all_contact(pool5,keep_prob): 80 pool_shape=pool5.get_shape().as_list() 81 nodes=[-1,pool_shape[1]*pool_shape[2]*pool_shape[3]] 82 densel=tf.reshape(pool5,nodes) 83 with tf.name_scope('fc1'): 84 w1=tf.Variable(tf.truncated_normal([9216,1024],stddev=1e-1),name='w1') 85 b1=tf.Variable(tf.constant(0.0,tf.float32,shape=[1024]),trainable=True,name='b1') 86 fc1=tf.nn.relu(tf.nn.bias_add(tf.matmul(densel,w1),b1)) 87 #设置dropout层 88 fc1_drop=tf.nn.dropout(fc1,keep_prob) 89 print_activations(fc1_drop) 90 91 with tf.name_scope('fc2'): 92 w2=tf.Variable(tf.truncated_normal([1024,1024],stddev=1e-1),name='w2') 93 b2=tf.Variable(tf.constant(0.0,tf.float32,shape=[1024]),trainable=True,name='b1') 94 fc2=tf.nn.relu(tf.nn.bias_add(tf.matmul(fc1_drop,w2),b2)) 95 #设置dropout层 96 fc2_drop=tf.nn.dropout(fc2,keep_prob) 97 print_activations(fc2_drop) 98 99 with tf.name_scope('fc3'): 100 w3=tf.Variable(tf.truncated_normal([1024,n_output],stddev=1e-1),name='w3') 101 b3=tf.Variable(tf.constant(0.0,tf.float32,shape=[n_output]),trainable=True,name='b1') 102 fc3=tf.nn.relu(tf.nn.bias_add(tf.matmul(fc2_drop,w3),b3)) 103 print_activations(fc3) 104 return fc3 105 106 def time_tensorflow_run(session,target,info_string): 107 num_steps_burn_in=10 108 total_duration=0.0 109 total_duration_squared=0.0 110 for i in range(num_batches+num_steps_burn_in): 111 start_time=time.time() 112 _=session.run(target) 113 duration=time.time()-start_time 114 if i>=num_steps_burn_in: 115 if not i %10: 116 print('%s: step %d. duration=%.3f'%(datetime.now(),i-num_steps_burn_in,duration)) 117 total_duration+=duration 118 total_duration_squared+=duration*duration 119 mn=total_duration/num_batches 120 vr=total_duration_squared/num_batches-mn*mn 121 sd=math.sqrt(vr) 122 print('%s:%s across %d steps, %.3f+/-%.3f sec / batch'%(datetime.now(),info_string,num_batches,mn,sd)) 123 124 def run_benchmark(): 125 with tf.Graph().as_default(): 126 image_size=224 127 images=tf.Variable(tf.random_normal([batch_size,image_size,image_size,3],dtype=tf.float32,stddev=1e-1)) 128 pool5,parameters=inference(images) 129 all_contact(pool5,1.0) 130 init=tf.global_variables_initializer() 131 sess=tf.Session() 132 sess.run(init) 133 time_tensorflow_run(sess,pool5,"Forward") 134 objective=tf.nn.l2_loss(pool5) 135 grad=tf.gradients(objective,parameters) 136 time_tensorflow_run(sess,grad,"Forward-backward") 137 run_benchmark()
内容来源于网络如有侵权请私信删除
- 还没有人评论,欢迎说说您的想法!