Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Split tfrecords #3

Open
sigmoidx opened this issue Dec 12, 2018 · 1 comment
Open

Split tfrecords #3

sigmoidx opened this issue Dec 12, 2018 · 1 comment

Comments

@sigmoidx
Copy link

sigmoidx commented Dec 12, 2018

Hi,

I found your script works well but created a 32 GB sized tfrecord file (I changed 224 to 299). Is there any official tensforflow way to split it into multiple files with a smaller size each?

Thanks,

@sigmoidx
Copy link
Author

Hi, I found out how to split it....here is the code.

import sys
import glob
import threading
import numpy as np
from PIL import Image
import tensorflow as tf
from random import shuffle
from datetime import datetime


def _int64_feature(value):
  return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))

def _bytes_feature(value):
  return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

def image_preprocessing(image_file, height, width):  
	image = Image.open(image_file)
	image = image.resize((width, height), Image.ANTIALIAS)        
	np_image = np.array(image)
	np_image = np_image.astype(float)
 
	new_image = np.zeros((np_image.shape[0], np_image.shape[1], 3), dtype=float)

	if len(np_image.shape) == 2: # 1D images
		for each_channel in range(3):        
			new_image[:,:,each_channel] = np_image
	else:  # 3D or 4D images..we only take RGB channels
		for each_channel in range(3):
			new_image[:,:,each_channel] = np_image[:,:,each_channel]
	# flushing
	np_image = []

	return new_image 

def process_thread(thread_index, ranges, train_addrs, train_labels, num_shards, name):
	height = 299
	width = 299
	num_threads = len(ranges)
	assert not num_shards % num_threads
	num_shards_per_batch = int(num_shards / num_threads)

	shard_ranges = np.linspace(ranges[thread_index][0],
							 ranges[thread_index][1],
							 num_shards_per_batch + 1).astype(int)
	num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]

	counter = 0
	for s in range(num_shards_per_batch):
		shard = thread_index * num_shards_per_batch + s
		output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
		#train_filename = './tf_records/train.tfrecords' 
		writer = tf.python_io.TFRecordWriter(output_filename)

		shard_counter = 0
		files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)

		for i in files_in_shard:
			img = image_preprocessing(train_addrs[i], height, width)
			label = train_labels[i]

			# Create a feature
			feature = {'train/label': _int64_feature(label),
					   'train/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}
			# Create an example protocol buffer
			example = tf.train.Example(features=tf.train.Features(feature=feature))

			# Serialize to string and write on the file
			writer.write(example.SerializeToString())
			shard_counter += 1
			counter += 1

			if not counter % 1000:
				print('%s [thread %d]: Processed %d of %d images in thread batch.' %
				(datetime.now(), thread_index, counter, num_files_in_thread))
				sys.stdout.flush()

		writer.close()
		print('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_filename))
		sys.stdout.flush()
		shard_counter = 0
	print('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread))
	sys.stdout.flush()

shuffle_data = True  # shuffle the addresses before saving
cat_dog_train_path = '/media/geraldofrivia/Data/Datasets/pets/train/*.jpg'
# read addresses and labels from the 'train' folder
addrs = glob.glob(cat_dog_train_path)  
labels = [0 if 'cat' in addr else 1 for addr in addrs]  # 0 = Cat, 1 = Dog

# to shuffle data
if shuffle_data:
	c = list(zip(addrs, labels))
	shuffle(c)
	addrs, labels = zip(*c)

# Divide the hata into 60% train, 20% validation, and 20% test
train_addrs = addrs[0:int(0.6 * len(addrs))]
train_labels = labels[0:int(0.6 * len(labels))]
val_addrs = addrs[int(0.6 * len(addrs)):int(0.8 * len(addrs))]
val_labels = labels[int(0.6 * len(addrs)):int(0.8 * len(addrs))]
test_addrs = addrs[int(0.8 * len(addrs)):]
test_labels = labels[int(0.8 * len(labels)):]

# address to save the TFRecords file
# open the TFRecords file

num_shards = 16
num_threads = 8
spacing = np.linspace(0, len(train_addrs), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
	ranges.append([spacing[i], spacing[i + 1]])

save_directory = './tf_records/'
fileName = 'train.tfrecords'
name = save_directory + fileName

coord = tf.train.Coordinator()

threads = []
for thread_index in range(len(ranges)):
	args = (thread_index, ranges, train_addrs, train_labels, num_shards, name)
	t = threading.Thread(target=process_thread, args=args)
	t.start()
	threads.append(t)

# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' % (datetime.now(), len(train_addrs)))
sys.stdout.flush()



Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant