-
-
Notifications
You must be signed in to change notification settings - Fork 532
/
subsampling_step.py
executable file
·84 lines (72 loc) · 3.62 KB
/
subsampling_step.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import parameter_modules
from tensorforce.core.optimizers import MetaOptimizer
class SubsamplingStep(MetaOptimizer):
"""
The subsampling-step meta optimizer randomly samples a subset of batch instances to calculate
the optimization step of another optimizer.
"""
def __init__(self, name, optimizer, fraction, summary_labels=None):
"""
Subsampling-step optimizer constructor.
Args:
fraction (parameter, 0.0 < float <= 1.0): Fraction of instances of the batch to
subsample (**required**).
"""
super().__init__(name=name, optimizer=optimizer, summary_labels=summary_labels)
self.fraction = self.add_module(
name='fraction', module=fraction, modules=parameter_modules, dtype='float'
)
def tf_step(self, variables, arguments, **kwargs):
# Get some (batched) argument to determine batch size.
arguments_iter = iter(arguments.values())
some_argument = next(arguments_iter)
try:
while not isinstance(some_argument, tf.Tensor) or util.rank(x=some_argument) == 0:
if isinstance(some_argument, dict):
if some_argument:
arguments_iter = iter(some_argument.values())
some_argument = next(arguments_iter)
elif isinstance(some_argument, list):
if some_argument:
arguments_iter = iter(some_argument)
some_argument = next(arguments_iter)
elif some_argument is None or util.rank(x=some_argument) == 0:
# Non-batched argument
some_argument = next(arguments_iter)
else:
raise TensorforceError("Invalid argument type.")
except StopIteration:
raise TensorforceError("Invalid argument type.")
if util.tf_dtype(dtype='int') in (tf.int32, tf.int64):
batch_size = tf.shape(input=some_argument, out_type=util.tf_dtype(dtype='int'))[0]
else:
batch_size = tf.dtypes.cast(
x=tf.shape(input=some_argument)[0], dtype=util.tf_dtype(dtype='int')
)
fraction = self.fraction.value()
num_samples = fraction * tf.dtypes.cast(x=batch_size, dtype=util.tf_dtype('float'))
num_samples = tf.dtypes.cast(x=num_samples, dtype=util.tf_dtype('int'))
one = tf.constant(value=1, dtype=util.tf_dtype('int'))
num_samples = tf.maximum(x=num_samples, y=one)
indices = tf.random.uniform(
shape=(num_samples,), maxval=batch_size, dtype=util.tf_dtype(dtype='int')
)
function = (lambda x: tf.gather(params=x, indices=indices))
subsampled_arguments = util.fmap(function=function, xs=arguments)
return self.optimizer.step(variables=variables, arguments=subsampled_arguments, **kwargs)