Emotion

mnist함수를 이용한 tensorflow

lis29188 2018. 11. 20. 08:45
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
 
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
 
x_data=[784]
y_data=[10]
X=tf.placeholder(tf.float32,shape=[None,784])
Y=tf.placeholder(tf.float32,shape=[None,10])
 
w=tf.Variable(tf.random_normal([784,10]))
b=tf.Variable(tf.random_normal([10]))
 
hypothesis=tf.nn.softmax(tf.matmul(X,w)+b)#softmax와 행렬의 곱셈을 위한 matmaul
cost=-tf.reduce_mean(tf.reduce_sum(Y*tf.log(hypothesis)))#cost의 평균치를 내기위한 reduce_mean
optimizer= tf.train.GradientDescentOptimizer(learning_rate= 0.001)#경사하강법을 위한 미분과 적분을 해주는 optimizer
train = optimizer.minimize(cost)#cost값을 최소화
total_epoch = 60#첫번째 for문에서 돌아가는 횟수
batch_size=100#두번째 for문에서 돌라가는 횟수
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())#전역변수 초기화?
    for epoch in range (total_epoch):
        sum_cost = 0
        total_batch = int(mnist.train.num_examples / batch_size)
 
        for step in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
 
            _,cost_ = sess.run([train,cost], feed_dict={X:batch_x,Y:batch_y})
            sum_cost+=cost_
 
        print("epoch:%4d, cost:%9f" % (epoch + 1, sum_cost))#epoch와 cost값을 출력
    print("Learning finished")
    is_correct = tf.equal(tf.arg_max(hypothesis,1), tf.arg_max(Y,1))
    accuracy = tf.reduce_mean(tf.cast(is_correct,tf.float32))#정확도의 평균값을 구함
    print('accuracy:',sess.run(accuracy,feed_dict={X: mnist.test.images,Y:mnist.test.labels}))
 
 
 
cs