-
Notifications
You must be signed in to change notification settings - Fork 627
/
data_utils.py
138 lines (125 loc) · 5.29 KB
/
data_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data utility methods.
Collection of utility methods that make CNN benchmark code use tf.data easier.
"""
import tensorflow as tf
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.contrib.data.python.ops import threadpool
from tensorflow.python.framework import function
from tensorflow.python.platform import gfile
def build_prefetch_image_processing(height, width, batch_size, num_splits,
preprocess_fn, cpu_device, params,
gpu_devices, dataset):
""""Returns FunctionBufferingResources that do image pre(processing)."""
with tf.device(cpu_device):
if params.eval:
subset = 'validation'
else:
subset = 'train'
function_buffering_resources = []
remote_fn, args = minibatch_fn(
height=height,
width=width,
batch_size=batch_size,
num_splits=num_splits,
preprocess_fn=preprocess_fn,
dataset=dataset,
subset=subset,
train=(not params.eval),
cache_data=params.cache_data,
num_threads=params.datasets_num_private_threads)
for device_num in range(len(gpu_devices)):
with tf.device(gpu_devices[device_num]):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=remote_fn,
target_device=cpu_device,
string_arg=args[0],
buffer_size=params.datasets_prefetch_buffer_size,
shared_name=None)
function_buffering_resources.append(buffer_resource_handle)
return function_buffering_resources
def get_images_and_labels(function_buffering_resource, data_type):
"""Given a FunctionBufferingResource obtains images and labels from it."""
return prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=function_buffering_resource,
output_types=[data_type, tf.int32])
def create_iterator(batch_size,
num_splits,
batch_size_per_split,
preprocess_fn,
dataset,
subset,
train,
cache_data,
num_threads=None):
"""Creates a dataset iterator for the benchmark."""
glob_pattern = dataset.tf_record_pattern(subset)
file_names = gfile.Glob(glob_pattern)
if not file_names:
raise ValueError('Found no files in --data_dir matching: {}'
.format(glob_pattern))
ds = tf.data.TFRecordDataset.list_files(file_names)
ds = ds.apply(
interleave_ops.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=10))
if cache_data:
ds = ds.take(1).cache().repeat()
counter = tf.data.Dataset.range(batch_size)
counter = counter.repeat()
ds = tf.data.Dataset.zip((ds, counter))
ds = ds.prefetch(buffer_size=batch_size)
if train:
ds = ds.shuffle(buffer_size=10000)
ds = ds.repeat()
ds = ds.apply(
batching.map_and_batch(
map_func=preprocess_fn,
batch_size=batch_size_per_split,
num_parallel_batches=num_splits))
ds = ds.prefetch(buffer_size=num_splits)
if num_threads:
ds = threadpool.override_threadpool(
ds,
threadpool.PrivateThreadPool(
num_threads, display_name='input_pipeline_thread_pool'))
ds_iterator = ds.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
ds_iterator.initializer)
else:
ds_iterator = ds.make_one_shot_iterator()
return ds_iterator
def minibatch_fn(height, width, batch_size, num_splits, preprocess_fn, dataset,
subset, train, cache_data, num_threads):
"""Returns a function and list of args for the fn to create a minibatch."""
batch_size_per_split = batch_size // num_splits
with tf.name_scope('batch_processing'):
ds_iterator = create_iterator(batch_size, num_splits, batch_size_per_split,
preprocess_fn, dataset, subset, train,
cache_data, num_threads)
ds_iterator_string_handle = ds_iterator.string_handle()
@function.Defun(tf.string)
def _fn(h):
depth = 3
remote_iterator = tf.data.Iterator.from_string_handle(
h, ds_iterator.output_types, ds_iterator.output_shapes)
labels, images = remote_iterator.get_next()
images = tf.reshape(
images, shape=[batch_size_per_split, height, width, depth])
labels = tf.reshape(labels, [batch_size_per_split])
return images, labels
return _fn, [ds_iterator_string_handle]