逻辑回归
- import tensorflow as tf
- # Import MINST data
- from tensorflow.examples.tutorials.mnist import input_data
- mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
- # Parameters
- learning_rate = 0.01
- training_epochs = 25
- batch_size = 100
- display_step = 1
- # tf Graph Input
- x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
- y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
- # Set model weights
- W = tf.Variable(tf.zeros([784, 10]))
- b = tf.Variable(tf.zeros([10]))
- # Construct model
- pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
- # Minimize error using cross entropy
- cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
- # Gradient Descent
- optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
- # Initializing the variables
- init = tf.initialize_all_variables()
- # Launch the graph
- with tf.Session() as sess:
- sess.run(init)
- # Training cycle
- for epoch in range(training_epochs):
- avg_cost = 0.
- total_batch = int(mnist.train.num_examples/batch_size)
- # Loop over all batches
- for i in range(total_batch):
- batch_xs, batch_ys = mnist.train.next_batch(batch_size)
- # Run optimization op (backprop) and cost op (to get loss value)
- _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
- y: batch_ys})
- # Compute average loss
- avg_cost += c / total_batch
- # Display logs per epoch step
- if (epoch+1) % display_step == 0:
- print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
- print "Optimization Finished!"
- # Test model
- correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
- # Calculate accuracy
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
- # result :
- Epoch: 0001 cost= 29.860467369
- Epoch: 0002 cost= 22.001451784
- Epoch: 0003 cost= 21.019925554
- Epoch: 0004 cost= 20.561320320
- Epoch: 0005 cost= 20.109135756
- Epoch: 0006 cost= 19.927862290
- Epoch: 0007 cost= 19.548687116
- Epoch: 0008 cost= 19.429119071
- Epoch: 0009 cost= 19.397068211
- Epoch: 0010 cost= 19.180813479
- Epoch: 0011 cost= 19.026808132
- Epoch: 0012 cost= 19.057875510
- Epoch: 0013 cost= 19.009575057
- Epoch: 0014 cost= 18.873240641
- Epoch: 0015 cost= 18.718575359
- Epoch: 0016 cost= 18.718761925
- Epoch: 0017 cost= 18.673640560
- Epoch: 0018 cost= 18.562128253
- Epoch: 0019 cost= 18.458205289
- Epoch: 0020 cost= 18.538211225
- Epoch: 0021 cost= 18.443384213
- Epoch: 0022 cost= 18.428727668
- Epoch: 0023 cost= 18.304270616
- Epoch: 0024 cost= 18.323529782
- Epoch: 0025 cost= 18.247192113
- Optimization Finished!
- (10000, 784)
- Accuracy 0.9206