- plt.show()
- print("All is well")
- plt.plot(Total_test_acc, 'r')
- plt.ylabel('Test Accuracy')
- plt.subplot(2, 1, 2)
- plt.plot(Total_test_loss, 'r')
- plt.ylabel('Test loss')
- plt.subplot(2, 1, 1)
- "{:.3f}".format(Total_test_acc))
- "{:.3f}".format(Total_test_loss) + ", Test Accuracy= " + \
- print("Epoch: " + str(epoch + 1) + ", Test Loss= " + \
- Total_test_loss = Total_test_lost / test_batch_num
- Total_test_acc = Total_test_acc / test_batch_num
- Total_test_acc = Total_test_acc + test_acc
- Total_test_lost = Total_test_loss + test_loss
- keep_prob: 1.})
- y: batch_y,
- test_loss, test_acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
- batch_y = test_y[sample_ind, :]
- batch_x = test_x[sample_ind, :]
- sample_ind = Test_ind[test_batch * batch_size:(test_batch + 1) * batch_size]
- for test_batch in range(0, int(test_batch_num)):
- # Calculate test loss and test accuracy
- "{:.3f}".format(acc))
- "{:.3f}".format(loss) + ", Training Accuracy= " + \
- print("Epoch: " + str(epoch + 1) + ", Batch: " + str(train_batch) + ", Loss= " + \
- keep_prob: 1.})
- y: batch_y,
- loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
- # Calculate loss and accuracy
- if train_batch % batch_size == 0:
- keep_prob: dropout})
- sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
- # Run optimization op (backprop)
- batch_y = train_y[sample_ind, :]
- batch_x = train_x[sample_ind, :]
- sample_ind = Train_ind[train_batch * batch_size:(train_batch + 1) * batch_size]
- for train_batch in range(0, int(train_batch_num)):
- Total_test_acc = 0
- Total_test_loss = 0
- for epoch in range(0, train_epoch):
- sess.run(init)
- with tf.Session() as sess:
- Test_ind = np.arange(test_num)
- Train_ind = np.arange(train_num)
- init = tf.initialize_all_variables()
- # Initializing the variables
- accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
- correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
- # Evaluate model
- optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
- # Define loss and optimizer
- pred = conv_net(x, weights, biases, keep_prob)
- # Construct model
- }
- 'out': tf.Variable(tf.random_normal([n_classes]))
- 'bd1': tf.Variable(tf.random_normal([200])),
- 'bc3': tf.Variable(tf.random_normal([32])),
- 'bc2': tf.Variable(tf.random_normal([64])),
- 'bc1': tf.Variable(tf.random_normal([128])),
- biases = {
- }
- 'out': tf.Variable(tf.random_normal([200, n_classes]))
- # 1024 inputs, 10 outputs (class prediction)
- 'wd1': tf.Variable(tf.random_normal([6 * 6 * 32, 200])),
- # fully connected,
- 'wc3': tf.Variable(tf.random_normal([3, 3, 64, 32])),
- # 3x3 conv, 64 inputs, 32 outputs
- 'wc2': tf.Variable(tf.random_normal([3, 3, 128, 64])),
- # 3x3 conv, 128 inputs, 64 outputs
- 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 128])),
- # 3x3 conv, 1 input, 128 outputs
- weights = {
- # Store layers weight & bias
- return out
- out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
- # Output, class prediction
- fc1 = tf.nn.dropout(fc1, dropout)
- # Apply Dropout
- fc1 = tf.nn.relu(fc1)
- fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
- fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]])
- # Reshape conv2 output to fit fully connected layer input
- # Fully connected layer
- conv3 = maxpool2d(conv3, k=2)
- # Max Pooling (down-sampling)
- conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
- # Convolution Layer
- conv2 = maxpool2d(conv2, k=2)
- # Max Pooling (down-sampling)
- conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
- # Convolution Layer
- conv1 = maxpool2d(conv1, k=2)
- # Max Pooling (down-sampling)
- conv1 = conv2d(x, weights['wc1'], biases['bc1'])
- # Convolution Layer
- x = tf.reshape(x, shape=[-1, 48, 48, 1])
- # Reshape input picture
- def conv_net(x, weights, biases, dropout):
- # Create model
- padding='VALID')
- return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
- # MaxPool2D wrapper
- def maxpool2d(x, k=2):
- return tf.nn.relu(x)
- x = tf.nn.bias_add(x, b)
- x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
- # Conv2D wrapper, with bias and relu activation
- def conv2d(x, W, b, strides=1):
- # Create some wrappers for simplicity
- keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)
- y = tf.placeholder(tf.float32, [None, n_classes])
- x = tf.placeholder(tf.float32, [None, n_input])
- # tf Graph input
- dropout = 0.5 # Dropout, probability to keep units
- n_classes = 7 # total classes
- n_input = 2304 # data input (img shape: 48*48)
- # Network Parameters
- learning_rate = 0.001
- train_epoch = 100
- test_batch_num = test_num / batch_size
- train_batch_num = train_num / batch_size
- batch_size = 50
- print("All is well")
- test_y = Face_label[train_num: train_num + test_num, :]
- test_x = Face_data[train_num: train_num + test_num, :]
- train_y = Face_label[0:train_num, :]
- train_x = Face_data[0:train_num, :]
- test_num = 5000
- train_num = 30000
- print('i: %d \t '%(i), Face_label[i])
- if i <10:
- Face_label[i, int(label[i])] = 1
- Face_data[i] = x
- x = x / (x_max + 0.0001)
- x_max = x.max()
- x = np.fromstring(x, dtype=float, sep=' ')
- x = img_data[i]
- for i in range(N_sample):
- temp = np.zeros((7), dtype= int)
- Face_label = np.zeros((N_sample, 7), dtype=int)
- Face_data = np.zeros((N_sample, 48 * 48))
- # print label.size
- print(N_sample)
- N_sample = label.size
- img_data = np.array(data['pixels'])
- label = np.array(data['emotion'])
- data = pd.read_csv(file_path, dtype='a')
- print(file_path)
- file_path = dir_name + os.sep + files[0]
- print(dir_name + os.sep + f)
- for f in files:
- files = os.listdir(dir_name)
- print('The folder path: ', dir_name)
- print('----------- no sub dir')
- dir_name = 'data'
- import pandas as pd
- import tensorflow as tf
- import random
- import scipy.io
- import matplotlib.pyplot as plt
- import numpy as np
- import string, os, sys
- """
- @author: sqh4587
- """
- # -*- coding: utf-8 -*-
来源: http://blog.csdn.net/sqh4587/article/details/74507010