Permalink
8b5827b Dec 8, 2018
1 contributor

Users who have contributed to this file

75 lines (65 sloc) 2.37 KB
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from ray.rllib.models.model import Model
from ray.rllib.models.misc import get_activation_fn, flatten
from ray.rllib.utils.annotations import override
class VisionNetwork(Model):
"""Generic vision network."""
@override(Model)
def _build_layers_v2(self, input_dict, num_outputs, options):
inputs = input_dict["obs"]
filters = options.get("conv_filters")
if not filters:
filters = _get_filter_config(inputs)
activation = get_activation_fn(options.get("conv_activation"))
with tf.name_scope("vision_net"):
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
inputs = slim.conv2d(
inputs,
out_size,
kernel,
stride,
activation_fn=activation,
scope="conv{}".format(i))
out_size, kernel, stride = filters[-1]
fc1 = slim.conv2d(
inputs,
out_size,
kernel,
stride,
activation_fn=activation,
padding="VALID",
scope="fc1")
fc2 = slim.conv2d(
fc1,
num_outputs, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope="fc2")
return flatten(fc2), flatten(fc1)
def _get_filter_config(inputs):
filters_84x84 = [
[16, [8, 8], 4],
[32, [4, 4], 2],
[256, [11, 11], 1],
]
filters_42x42 = [
[16, [4, 4], 2],
[32, [4, 4], 2],
[256, [11, 11], 1],
]
shape = inputs.shape.as_list()[1:]
if len(shape) == 3 and shape[:2] == [84, 84]:
return filters_84x84
elif len(shape) == 3 and shape[:2] == [42, 42]:
return filters_42x42
else:
raise ValueError(
"No default configuration for obs input {}".format(inputs) +
", you must specify `conv_filters` manually as a model option. "
"Default configurations are only available for inputs of size "
"[?, 42, 42, K] and [?, 84, 84, K]. You may alternatively want "
"to use a custom model or preprocessor.")