I used a tfrecord file to train the stacked autoencoder. My function is:
import tensorflow as tf import numpy as np import readers import pre_precessing from app_flag import FLAGS from StackedAutoencoder import StackedAutoencoder def write_and_encode(data_list, tfrecord_filename): writer = tf.python_io.TFRecordWriter(tfrecord_filename) for label, data_matrix in data_list: example = tf.train.Example(features=tf.train.Features( feature={ "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), "data_raw": tf.train.Feature(bytes_list=tf.train.BytesList(value=[data_matrix.tostring()])) } )) writer.write(example.SerializeToString()) writer.close() def read_and_decode(tfrecord_filename): reader = tf.TFRecordReader() filename_queue = tf.train.string_input_producer([tfrecord_filename],) _, serialized_example = reader.read(filename_queue) feature = tf.parse_single_example(serialized_example, features={ "label": tf.FixedLenFeature([], tf.int64), "data_raw": tf.FixedLenFeature([], tf.string) }) data = tf.decode_raw(feature["data_raw"], tf.float64) data = tf.reshape(data, [FLAGS.image_rows, FLAGS.image_cols]) return data, feature["label"] def train_input_fn(): tfrecord_file = "../resources/train_tfrecord" dataset = tf.data.TFRecordDataset(tfrecord_file) dataset = dataset.map(parser) train_dataset = dataset.repeat(FLAGS.num_epochs).batch(FLAGS.batch_size) train_iterator = train_dataset.make_one_shot_iterator() features, labels = train_iterator.get_next() return features, labels def parser(record_line): features = { "label": tf.FixedLenFeature([], tf.int64), "data_raw": tf.FixedLenFeature([], tf.string) } parsed = tf.parse_single_example(record_line, features=features) label = tf.cast(parsed["label"], tf.int32) - 1 data = tf.decode_raw(parsed["data_raw"], tf.float64) data = tf.reshape(data, [FLAGS.image_rows, FLAGS.image_cols]) data = tf.cast(data, tf.float32) return data, label def write_user_instances_to_tfrecord(): users = ["0"+str(i) for i in range(1, 10)] users.extend([str(i) for i in range(10, 17)]) users.extend(["32", "40", "41", "42", "43", "49", "50", "51"]) instances = [] for user in users: train_data = readers.read_user_files(user) for label, instance in train_data.items(): instances.append((label, instance)) formalized_instances = pre_precessing.extend_to_maxsize(instances) train_instances = formalized_instances[:100] write_and_encode(train_instances, "../resources/train_tfrecord") def main(): build_stacked_ae("../resources/train_tfrecord") def build_stacked_ae(path): """ Build the stacked auto-encoder neural network, and evaluate its performance """ ############### Stacked Auto-Encoders ############## features,labels=train_input_fn() ae = StackedAutoencoder(features,labels,5) ae.create_autoencoder() result = ae.evaluate_autoencoder() return result[1] * 100 print("Accuracy: %.2f%%" % (result[1] * 100)) if __name__ == "__main__": main()I can't fix the error any help please?