python - tensorflow中TFRecord是怎麼用的?
大家讲道理
大家讲道理 2017-05-18 10:47:09
0
1
865
  1. 怎麼把下面的程式碼中的mnist資料集換成TFRecord

  2. 假設TFRecord資料集已經準備好,train.tfrecordstest.tfrecords都在目前py的目錄下

  3. #已經有TFRecord的讀取程式碼。

def read_and_decode(filename): filename_queue = tf.train.string_input_producer([filename]) reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features={ 'label': tf.FixedLenFeature([], tf.int64), 'img_raw': tf.FixedLenFeature([], tf.string), }) img = tf.decode_raw(features['img_raw'], tf.uint8) img = tf.reshape(img, [512, 288, 3]) img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 label = tf.cast(features['label'], tf.int32) return img, label
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_data", one_hot=True) # Parameters learning_rate = 0.001 training_iters = 200000 batch_size = 64 display_step = 20 # Network Parameters n_input = 784 # MNIST data input (img shape: 28*28) n_classes = 10 # MNIST total classes (0-9 digits) dropout = 0.75 # Dropout, probability to keep units # tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_prob = tf.placeholder(tf.float32) # dropout (keep probability) def init_weights(shape): return tf.Variable(tf.random_normal(shape, stddev=0.01)) # Create custom model def conv2d(name, l_input, w, b): return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'), b), name=name) def max_pool(name, l_input, k): return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name) def norm(name, l_input, lsize=4): return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name) def dnn(_x, _weights, _biases, _dropout): _x = tf.nn.dropout(_x, _dropout) d1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(_x, _weights['wd1']), _biases['bd1']), name="d1") d2x = tf.nn.dropout(d1, _dropout) d2 = tf.nn.relu(tf.nn.bias_add(tf.matmul(d2x, _weights['wd2']), _biases['bd2']), name="d2") dout = tf.nn.dropout(d2, _dropout) out = tf.matmul(dout, _weights['out']) + _biases['out'] return out # Store layers weight & bias weights = { 'wd1': tf.Variable(tf.random_normal([784, 600], stddev=0.01)), 'wd2': tf.Variable(tf.random_normal([600, 480], stddev=0.01)), 'out': tf.Variable(tf.random_normal([480, 10])) } biases = { 'bd1': tf.Variable(tf.random_normal([600])), 'bd2': tf.Variable(tf.random_normal([480])), 'out': tf.Variable(tf.random_normal([10])) } # Construct model pred = dnn(x, weights, biases, keep_prob) # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf.global_variables_initializer() # tf.summary.scalar("loss", cost) tf.summary.scalar("accuracy", accuracy) # Merge all summaries to a single operator merged_summary_op = tf.summary.merge_all() # Launch the graph with tf.Session() as sess: sess.run(init) summary_writer = tf.summary.FileWriter('/tmp/logs/ex12_dnn', graph=sess.graph) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout}) if step % display_step == 0: # Calculate batch accuracy acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) # Calculate batch loss loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)) summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) summary_writer.add_summary(summary_str, step) step += 1 print("Optimization Finished!") # Calculate accuracy for 256 mnist test images print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})) # 98%

不知道具體怎麼使用, 改了幾次執行都會報錯

錯誤類似

ValueError: Only call `softmax_cross_entropy_with_logits` with named arguments (labels=..., logits=..., ...)
大家讲道理
大家讲道理

光阴似箭催人老,日月如移越少年。

全部回覆 (1)
黄舟

不知道是否理解你的意思,這段代碼mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_data", one_hot=True)讀取的就是mnist數據,你把它換掉,然後在使用TFRecord的讀取代碼讀取TFRecord數據,將下面訓練網絡的代碼中的mnist也換掉,同時確保你使用的捲積操作參數要和TFRecord資料對應。

    最新下載
    更多>
    網站特效
    網站源碼
    網站素材
    前端模板
    關於我們 免責聲明 Sitemap
    PHP中文網:公益線上PHP培訓,幫助PHP學習者快速成長!