Выбрать главу

print(session.run(task, feed_dict={a: [10,11], b: [3,4]}))

session.close()

Здесь мы задаем граф вычислений по формуле a*k + b, где a и b - это переменные, для указания этого используется специальный тип placeholder. Теперь, при вычислении командой session.run мы можем передать различные параметры, например a=2 и b=3:

print(session.run(task, feed_dict={a: 2, b: 3}))

>> 7

Можно даже передать массивы в качестве параметра:

print(session.run(task, feed_dict={a: [10,11], b: [3,4]}))

> [23, 26]

Библиотека TensorFlow поддерживает множество различных операций, например, вычисления с матрицами, ну и разумеется, большое количество функций предназначено специально для обработки данных и машинного обучения. Вернемся к предыдущей главе, и перепишем нашу нейронную сеть, распознающую текст. За основу исходного кода был взят готовый пример из https://github.com/aymericdamien/TensorFlow-Examples/tree/master/examples желающие могут изучить приведенные там примеры более подробно.

Код целиком приведен ниже:

import tensorflow as tf

import _pickle as cPickle

# Prepare training data

training_epochs = 500

learning_rate = 0.001

inputs = []

targets = []

with open("digitsMnist.txt", 'rb') as fp:

digits = cPickle.load(fp)

print("Digits loaded:", len(digits))

inputs = list(map(lambda x: x["data"], digits))

def arrayFromDigit(data):

result = data["result"]

# Convert value to array: "3" => [0,0,0,1,0,0,0,0,0,0]

result_flat = [0.0]*10

result_flat[result] = 1.0

return result_flat

targets = list(map(arrayFromDigit, digits))

# Network Parameters

n_hidden_1 = 48 # 1st layer number of neurons

n_input = 784 # MNIST data input (img shape: 28*28)

n_classes = 10 # MNIST total classes (0-9 digits)

model_path = "model_mlp.ckpt"

# tf Graph input

X = tf.placeholder("float", [None, n_input])

Y = tf.placeholder("float", [None, n_classes])

# Store layers weight & bias

weights = {

'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),

'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))

}

biases = {

'b1': tf.Variable(tf.random_normal([n_hidden_1])),

'out': tf.Variable(tf.random_normal([n_classes]))

}

# Create model

def multilayer_perceptron(x):

# Hidden fully connected layer with 256 neurons

layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])

# Output fully connected layer with a neuron for each class

out_layer = tf.matmul(layer_1, weights['out']) + biases['out']

return out_layer

# Construct model

logits = multilayer_perceptron(X)

# Define loss and optimizer

loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

train_op = optimizer.minimize(loss_op)

# Initializing the variables

init = tf.global_variables_initializer()

# Save/restore model

saver = tf.train.Saver()

session = tf.Session()

session.run(init)

# Training cycle

for p in range(training_epochs):

_, c = session.run([train_op,loss_op], feed_dict={X:inputs, Y:targets})

if p % 50 == 0:

print("Step {} of {}".format(p, training_epochs))

print("Training Finished!")

# Save model (optional)

# save_path = saver.save(sess, './' + model_path)

# print("Model saved in file: %s" % save_path)

# Restore model weights from previously saved model (optional)

# saver.restore(sess, model_path)

# print("Model restored from file: %s" % save_path)

# Use model

pred = tf.nn.softmax(logits) # Apply softmax to logits

print("")

t = [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,

0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,

0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,

0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,

0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,

0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]

res = session.run(pred, feed_dict={X: [t]})

print("Result:", res[0])

session.close()