karl_TUM karl_TUM - 1 year ago 189
Python Question

Tensorflow error: InvalidArgumentError: Different number of component types.

I want to input batches of shuffled images to be training, and I write the code according to the generic input images in TensorVision, but I get an error. I cannot figure it where it is wrong.
This is my code:

import os
import tensorflow as tf

def read_labeled_image_list(image_list_file):
Read a .txt file containing pathes and labeles.
image_list_file : a .txt file with one /path/to/image per line
label : optionally, if set label will be pasted after each line
List with all filenames in file image_list_file
f = open(image_list_file, 'r')
filenames = []
labels = []
for line in f:
filename, label = line[:-1].split(' ')
return filenames, labels

def read_images_from_disk(input_queue):
"""Consumes a single filename and label as a ' '-delimited string.
filename_and_label_tensor: A scalar string tensor.
Two tensors: the decoded image, and the string label.
label = input_queue[1]
file_contents = tf.read_file(input_queue[0])
example = tf.image.decode_png(file_contents, channels=3)
# example = rescale_image(example)
# processed_label = label
return example, label

def random_resize(image, lower_size, upper_size):
"""Randomly resizes an image
a randomly resized image

new_size = tf.to_int32(
tf.random_uniform([], lower_size, upper_size))

return tf.image.resize_images(image, new_size, new_size,
def _input_pipeline(filename, batch_size,
processing_image=lambda x: x,
processing_label=lambda y: y,
"""The input pipeline for reading images classification data.
The data should be stored in a single text file of using the format:
/path/to/image_0 label_0
/path/to/image_1 label_1
/path/to/image_2 label_2
filename: the path to the txt file
batch_size: size of batches produced
num_epochs: optionally limited the amount of epochs
List with all filenames in file image_list_file

# Reads pfathes of images together with there labels
image_list, label_list = read_labeled_image_list(filename)

images = tf.convert_to_tensor(image_list, dtype=tf.string)
labels = tf.convert_to_tensor(label_list, dtype=tf.int32)

# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels],

# Reads the actual images from
image, label = read_images_from_disk(input_queue)
pr_image = processing_image(image)
pr_label = processing_label(label)

image_batch, label_batch = tf.train.batch([pr_image, pr_label],
shapes = [256,256,3])

# Display the training images in the visualizer.
tensor_name = image.op.name
tf.image_summary(tensor_name + 'images', image_batch)
return image_batch, label_batch
def test_pipeline():
data_folder = '/home/kang/Documents/work_code_PC1/data/UCLandUsedImages/'
data_file = 'UCImage_Labels.txt'

filename = os.path.join(data_folder, data_file)

image_batch, label_batch = _input_pipeline(filename, 75)

# Create the graph, etc.
init_op = tf.initialize_all_variables()
sess = tf.InteractiveSession()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

a = sess.run([image_batch, label_batch])

print("Finish Test")
return a

if __name__ == '__main__':
# aa = test_preprocc()
# matplotlib.pyplot.imshow(aa[1])
a1 = test_pipeline()
a2 = test_pipeline()

but it comes out an error, it confuses me for a long time:

Traceback (most recent call last):

File "<ipython-input-7-e24901ce3365>", line 1, in <module>
runfile('/home/kang/Documents/work_code_PC1/VGG_tensorflow_UCMerced/readUClandUsedImagetxt1.py', wdir='/home/kang/Documents/work_code_PC1/VGG_tensorflow_UCMerced')

File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 714, in runfile
execfile(filename, namespace)

File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 81, in execfile
builtins.execfile(filename, *where)

File "/home/kang/Documents/work_code_PC1/VGG_tensorflow_UCMerced/readUClandUsedImagetxt1.py", line 254, in <module>
a1 = test_pipeline()

File "/home/kang/Documents/work_code_PC1/VGG_tensorflow_UCMerced/readUClandUsedImagetxt1.py", line 244, in test_pipeline
a = sess.run([image_batch, label_batch])

File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 340, in run

File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 564, in _run
feed_dict_string, options, run_metadata)

File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 637, in _do_run
target_list, options, run_metadata)

File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 659, in _do_call

InvalidArgumentError: Different number of component types. Types: uint8, int32, Shapes: [[256,256,3]]
[[Node: batch_11/fifo_queue = FIFOQueue[capacity=32, component_types=[DT_UINT8, DT_INT32], container="", shapes=[[256,256,3]], shared_name="", _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op u'batch_11/fifo_queue', defined at:

Answer Source

The error is due to wrong argument shapes for function tf.train.batch. The argument shapes should be left to default, or should be:

shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for tensor_list

Here you are giving shapes = [256, 256, 3], but you should give the shape for pr_image and pr_label in a list:

image_batch, label_batch = tf.train.batch(
    [pr_image, pr_label],
    shapes = [[256,256,3], pr_label.get_shape()])
Recommended from our users: Dynamic Network Monitoring from WhatsUp Gold from IPSwitch. Free Download