Skip to content
Permalink
r1.8
Switch branches/tags
Go to file
 
 
Cannot retrieve contributors at this time
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as tf_base_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.training import moving_averages
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
py_all = all
py_sum = sum
# INTERNAL UTILS
# This is the default internal TF session used by Keras.
# It can be set manually via `set_session(sess)`.
_SESSION = None
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = {}
# This dictionary holds a mapping {graph: UID_DICT}.
# each UID_DICT is a dictionary mapping name prefixes to a current index,
# used for generating graph-specific string UIDs
# for various names (e.g. layer names).
_GRAPH_UID_DICTS = {}
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
@tf_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@tf_export('keras.backend.epsilon')
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
```python
>>> keras.backend.epsilon()
1e-07
```
"""
return _EPSILON
@tf_export('keras.backend.set_epsilon')
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Arguments:
value: float. New value of epsilon.
Example:
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-07
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
"""
global _EPSILON
_EPSILON = value
@tf_export('keras.backend.floatx')
def floatx():
"""Returns the default float type, as a string.
E.g. 'float16', 'float32', 'float64'.
Returns:
String, the current default float type.
Example:
```python
>>> keras.backend.floatx()
'float32'
```
"""
return _FLOATX
@tf_export('keras.backend.set_floatx')
def set_floatx(value):
"""Sets the default float type.
Arguments:
value: String; 'float16', 'float32', or 'float64'.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
@tf_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=_FLOATX)
@tf_export('keras.backend.image_data_format')
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
```python
>>> keras.backend.image_data_format()
'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
@tf_export('keras.backend.set_image_data_format')
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Arguments:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format: ' + str(data_format))
_IMAGE_DATA_FORMAT = str(data_format)
@tf_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = ops.get_default_graph()
if graph not in tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS:
tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(
int)
layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@tf_export('keras.backend.reset_uids')
def reset_uids():
per_graph_layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
@tf_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION = None
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = phase
@tf_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@tf_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if context.executing_eagerly():
if 'eager' not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES['eager']
graph = ops.get_default_graph()
if graph not in _GRAPH_LEARNING_PHASES:
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@tf_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
if context.executing_eagerly():
_GRAPH_LEARNING_PHASES['eager'] = value
else:
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = value
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
The provided value.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
previous_value = learning_phase()
try:
set_learning_phase(value)
yield value
finally:
# Restore learning phase to initial value.
if context.executing_eagerly():
_GRAPH_LEARNING_PHASES['eager'] = previous_value
else:
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = previous_value
@tf_export('keras.backend.get_session')
def get_session():
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Returns:
A TensorFlow session.
"""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if _SESSION is None:
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread, allow_soft_placement=True)
_SESSION = session_module.Session(config=config)
session = _SESSION
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
@tf_export('keras.backend.set_session')
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION = session
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
self.device = device
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
g = ops.get_default_graph()
op = _TfDeviceCaptureOp()
g._apply_device_functions(op)
return op.device
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@tf_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@tf_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
@tf_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
v._uses_learning_phase = False
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'get_shape'):
v._keras_shape = int_shape(value)
v._uses_learning_phase = False
return v
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = variables_module.global_variables()
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@tf_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@tf_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
x._uses_learning_phase = False
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@tf_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@tf_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
@tf_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.get_shape()._dims
if dims is not None:
return len(dims)
return None
@tf_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
"""
return x.dtype.base_dtype.name
@tf_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return to_dense(x).eval(session=get_session())
@tf_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.get_shape().as_list()):
return variable(v, dtype=dtype, name=name)
return v
@tf_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.get_shape().as_list()):
return variable(v, dtype=dtype, name=name)
return v
@tf_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@tf_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@tf_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@tf_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@tf_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@tf_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.get_shape().as_list())
@tf_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Example:
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@tf_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@tf_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@tf_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@tf_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@tf_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@tf_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
if axes is not None:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
else:
adj_x = None
adj_y = None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@tf_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@tf_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@tf_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@tf_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with miminum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@tf_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@tf_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@tf_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
m = math_ops.reduce_mean(x, axis, True)
devs_squared = math_ops.square(x - m)
return math_ops.reduce_mean(
devs_squared, axis, keepdims)
@tf_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
return math_ops.sqrt(var(x, axis=axis, keepdims=keepdims))
@tf_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@tf_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@tf_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@tf_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@tf_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@tf_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@tf_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@tf_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _to_tensor(0., x.dtype.base_dtype)
inf = _to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@tf_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@tf_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@tf_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@tf_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@tf_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@tf_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@tf_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@tf_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@tf_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@tf_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@tf_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@tf_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@tf_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@tf_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@tf_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@tf_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.get_shape()[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.get_shape()[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@tf_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@tf_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
epsilon: Fuzz factor.
Returns:
A tensor.
"""
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@tf_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all([is_sparse(x) for x in tensors]):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@tf_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@tf_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@tf_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[2:]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor]).astype('int32'))
x = permute_dimensions(x, [0, 2, 3, 1])
x = image_ops.resize_nearest_neighbor(x, new_shape)
x = permute_dimensions(x, [0, 3, 1, 2])
x.set_shape((None, None, original_shape[2] * height_factor
if original_shape[2] is not None else None,
original_shape[3] * width_factor
if original_shape[3] is not None else None))
return x
elif data_format == 'channels_last':
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[1:3]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor]).astype('int32'))
x = image_ops.resize_nearest_neighbor(x, new_shape)
x.set_shape((None, original_shape[1] * height_factor
if original_shape[1] is not None else None,
original_shape[2] * width_factor
if original_shape[2] is not None else None, None))
return x
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@tf_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@tf_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.get_shape().as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.get_shape()) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.get_shape().as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@tf_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@tf_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@tf_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@tf_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@tf_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@tf_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@tf_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@tf_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@tf_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@tf_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@tf_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@tf_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@tf_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if context.executing_eagerly():
return x.numpy()
return x.eval(session=get_session())
@tf_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
if tensors:
return get_session().run(tensors)
else:
return []
@tf_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if context.executing_eagerly():
x.assign(value)
else:
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@tf_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if context.executing_eagerly():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@tf_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
return logging_ops.Print(x, [x], message)
# GRAPH MANIPULATION
class Function(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.