How to predict with Tensorflow?

I'm beginner with Tensorflow and I want to test a Tensorflow code for gait recognization from this github: https://github.com/qinnzou/Gait-Recognition-Using-Smartphones. It seems not used whith Keras only with Tensoflow code.

I'm want to quickly test this code and later I'll try to undestand it. I don't know how to predict from this optimized model. Can you help me to predict with Tensorflow please? Thanks a lot. Théo

#!/usr/bin/env python
# coding: utf-8
import torch
from __future__ import division, print_function, absolute_import
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior() 
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def read_data(data_path):
    data = []
    # 1.txt
    file_names = os.listdir(data_path)
    file_names.sort(key=lambda x:int(x[:-4]))
    for file_name in file_names:
        file_path = os.path.join(data_path, file_name)
        signal_data = np.loadtxt(file_path)
        data.append(signal_data)
    data = np.array(data).transpose(0, 2, 1)
    d_shape = data.shape
    return data.reshape(d_shape[0], 1, d_shape[1], d_shape[2])

def read_label(data_path):
    data = []
    # 1.txt
    file_names = os.listdir(data_path)
    file_names.sort(key=lambda x:int(x[:-10]))
    for file_name in file_names:
        file_path = os.path.join(data_path, file_name)
        signal_data = np.loadtxt(file_path)
        data.append(signal_data)
    return np.array(data)

train_data_path = "../data//train_new//train_data"
train_label_path = "..//data//train_new//train_label"

test_data_path = "..//data//test//test_data"
test_label_path = "..//data//test//test_label"

train_data = read_data(train_data_path).transpose(0, 2, 3, 1) # 519
train_label = read_label(train_label_path)
test_data = read_data(test_data_path).transpose(0, 2, 3, 1) # 519
test_label = read_label(test_label_path)

print(train_data.shape)
print(train_label.shape)
print(test_data.shape)
print(test_label.shape)

# Training Parameters
learning_rate = 0.00001
num_steps = 20 # ATTENTION INITIALEMENT 150
batch_size = 32
display_step = 1
data_len = len(train_data)

# tf Graph input
X = tf.placeholder(tf.float32, [None, 6, 1024, 1])
Y = tf.placeholder(tf.float32, [None, 1024])

def cross_entropy(y_,output_map):
    return -tf.reduce_mean(y_*tf.log(tf.clip_by_value(output_map,1e-10,1.0))+(1-y_)*tf.log(tf.clip_by_value(1-output_map,1e-10,1.0)), name="cross_entropy")

# Create model
def conv_net(x):
    x = tf.pad(x, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv1_1 = tf.layers.conv2d(x, 64, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv1_1 = tf.pad(conv1_1, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv1_2 = tf.layers.conv2d(conv1_1, 64, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)

    
    conv2_1 = tf.layers.max_pooling2d(conv1_2, pool_size=[1, 2], strides=[1, 2], padding='valid')
    conv2_1 = tf.pad(conv2_1, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv2_2 = tf.layers.conv2d(conv2_1, 128, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv2_2 = tf.pad(conv2_2, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv2_3 = tf.layers.conv2d(conv2_2, 128, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    
    conv3_1 = tf.layers.max_pooling2d(conv2_3, pool_size=[1, 2], strides=[1, 2], padding='valid')
    
    conv3_1 = tf.pad(conv3_1, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv3_2 = tf.layers.conv2d(conv3_1, 256, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv3_2 = tf.pad(conv3_2, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv3_3 = tf.layers.conv2d(conv3_2, 256, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv3_3 = tf.pad(conv3_3, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv3_4 = tf.layers.conv2d(conv3_3, 256, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
   
    
    conv2_4_1 = tf.layers.conv2d_transpose(conv3_4, 128, kernel_size=[1, 2], strides=[1, 2], padding="VALID")
    conv2_4 = tf.concat([conv2_4_1, conv2_3], axis=3)
    conv2_4 = tf.pad(conv2_4, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv2_5 = tf.layers.conv2d(conv2_4, 128, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv2_5 = tf.pad(conv2_5, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv2_6 = tf.layers.conv2d(conv2_5, 128, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    
    conv1_3_1 = tf.layers.conv2d_transpose(conv2_6, 64, kernel_size=[1, 2], strides=[1, 2], padding="VALID")
    conv1_3 = tf.concat([conv1_2, conv1_3_1], axis=3)
    conv1_3 = tf.pad(conv1_3, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv1_4 = tf.layers.conv2d(conv1_3, 64, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv1_4 = tf.pad(conv1_4, paddings=[[0, 0], [0, 0], [7, 8], [0, 0]], mode="REFLECT")
    conv1_4 = tf.layers.conv2d(conv1_4, 64, kernel_size=[1, 16], padding='valid', activation=tf.nn.relu)
    conv1_5 = tf.layers.conv2d(conv1_4, 256, kernel_size=[6, 1], padding='valid', activation=tf.nn.relu)
    conv1_6 = tf.layers.conv2d(conv1_5, 1, kernel_size=[1, 1], padding='valid', activation=tf.nn.sigmoid)
    
    out = tf.reshape(conv1_6, [-1, 1024])
    return out

# Construct model
logits = conv_net(X)

# Define loss and optimizer
loss_op = cross_entropy(Y, logits)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)

# Evaluate model
pred = tf.greater(logits, 0.5)
pred = tf.cast(pred, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(pred, Y), tf.float32))

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training


sess = tf.Session()

# Run the initializer
sess.run(init)

for step in range(1, num_steps+1):
    np.random.seed(step)
    indices = np.arange(train_data.shape[0])
    np.random.shuffle(indices)
    train_data_new = train_data[indices]
    train_label_new = train_label[indices]

    start_point = 0
    end_point = start_point + batch_size
    while(end_point < data_len):
        batch_x = train_data_new[start_point: end_point]
        batch_y = train_label_new[start_point: end_point]

        start_point += batch_size
        end_point += batch_size

        # Run optimization op (backprop)
        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
        if step % display_step == 0 or step == 1:
            # Calculate batch loss and accuracy
            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
                                                                 Y: batch_y})
            print("Step " + str(step) + ", Minibatch Loss= " +                   "{:.4f}".format(loss) + ", Training Accuracy= " +                   "{:.3f}".format(acc))
#                 print("Testing Accuracy:", \
#                         sess.run(accuracy, feed_dict={X: test_data[:128],
#                                                       Y: test_label[:128]}))
    print('***********************')
    print("Testing Accuracy:",                     sess.run(accuracy, feed_dict={X: test_data,
                                                  Y: test_label}))
    print('***********************')

print("Optimization Finished!")
sess.close()


Read more here: https://stackoverflow.com/questions/64401108/how-to-predict-with-tensorflow

Content Attribution

This content was originally published by Theo75 at Recent Questions - Stack Overflow, and is syndicated here via their RSS feed. You can read the original post over there.

%d bloggers like this: