Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tensorflow/lite/builtin_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ typedef enum {
kTfLiteBuiltinAtan2 = 156,
kTfLiteBuiltinUnsortedSegmentMin = 157,
kTfLiteBuiltinSign = 158,
kTfLiteBuiltinBitcast = 159,
} TfLiteBuiltinOperator;

#ifdef __cplusplus
Expand Down
1 change: 1 addition & 0 deletions tensorflow/lite/core/api/flatbuffer_conversions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -885,6 +885,7 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_UNSORTED_SEGMENT_SUM:
case BuiltinOperator_ATAN2:
case BuiltinOperator_SIGN:
case BuiltinOperator_BITCAST:
case BuiltinOperator_WHERE:
return kTfLiteOk;
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
Expand Down
26 changes: 15 additions & 11 deletions tensorflow/lite/kernels/internal/reference/mul.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -56,7 +56,7 @@ inline void Mul(const ArithmeticParams& params,
const int flat_size =
MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
output_data[i] = ActivationFunctionWithMinMax<T>(
input1_data[i] * input2_data[i], output_activation_min,
output_activation_max);
}
Expand Down Expand Up @@ -128,14 +128,18 @@ inline void BroadcastMul4DSlow(const ArithmeticParams& params,
}
}

template <typename T>
void BroadcastMul4DSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
template <typename T,
// For unquantized mul on small integers, explictly set to true.
bool enable_for_short_integers = false>
inline typename std::enable_if<
!is_small_integer<T>::value || enable_for_short_integers, void>::type
BroadcastMul4DSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
Expand Down Expand Up @@ -167,7 +171,7 @@ void BroadcastMul4DSlow(const ArithmeticParams& params,
for (int x = 0; x < output_shape.Dims(2); ++x) {
for (int c = 0; c < output_shape.Dims(3); ++c) {
output_data[Offset(output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax(
ActivationFunctionWithMinMax<T>(
input1_data[SubscriptToIndex(desc1, b, y, x, c)] *
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
output_activation_min, output_activation_max);
Expand Down
14 changes: 13 additions & 1 deletion tensorflow/lite/kernels/internal/types.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -1025,6 +1025,12 @@ inline void SetActivationParams(int32_t min, int32_t max, P* params) {
params->quantized_activation_max = max;
}

template <typename P>
inline void SetActivationParams(uint32_t min, uint32_t max, P* params) {
params->quantized_activation_min = min;
params->quantized_activation_max = max;
}

template <typename P>
inline void SetActivationParams(int16_t min, int16_t max, P* params) {
params->int16_activation_min = min;
Expand All @@ -1043,6 +1049,12 @@ inline void GetActivationParams(const P& params, int32_t* min, int32_t* max) {
*max = params.quantized_activation_max;
}

template <typename P>
inline void GetActivationParams(const P& params, uint32_t* min, uint32_t* max) {
*min = params.quantized_activation_min;
*max = params.quantized_activation_max;
}

template <typename P>
inline void GetActivationParams(const P& params, int16_t* min, int16_t* max) {
*min = params.int16_activation_min;
Expand Down
70 changes: 69 additions & 1 deletion tensorflow/lite/python/schema_py_generated.py
Original file line number Diff line number Diff line change
Expand Up @@ -937,6 +937,70 @@ def Pack(self, builder):
from flatbuffers.compat import import_numpy
np = import_numpy()

class BitcastOptions(object):
__slots__ = ['_tab']

@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = BitcastOptions()
x.Init(buf, n + offset)
return x

@classmethod
def GetRootAsBitcastOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def BitcastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)

# BitcastOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)

def BitcastOptionsStart(builder): builder.StartObject(0)
def Start(builder):
return BitcastOptionsStart(builder)
def BitcastOptionsEnd(builder): return builder.EndObject()
def End(builder):
return BitcastOptionsEnd(builder)

class BitcastOptionsT(object):

# BitcastOptionsT
def __init__(self):
pass

@classmethod
def InitFromBuf(cls, buf, pos):
bitcastOptions = BitcastOptions()
bitcastOptions.Init(buf, pos)
return cls.InitFromObj(bitcastOptions)

@classmethod
def InitFromObj(cls, bitcastOptions):
x = BitcastOptionsT()
x._UnPack(bitcastOptions)
return x

# BitcastOptionsT
def _UnPack(self, bitcastOptions):
if bitcastOptions is None:
return

# BitcastOptionsT
def Pack(self, builder):
BitcastOptionsStart(builder)
bitcastOptions = BitcastOptionsEnd(builder)
return bitcastOptions
# automatically generated by the FlatBuffers compiler, do not modify

# namespace: tflite

from flatbuffers.compat import import_numpy
np = import_numpy()

class BroadcastToOptions(object):
__slots__ = ['_tab']

Expand Down Expand Up @@ -1394,6 +1458,7 @@ class BuiltinOperator(object):
ATAN2 = 156
UNSORTED_SEGMENT_MIN = 157
SIGN = 158
BITCAST = 159
# automatically generated by the FlatBuffers compiler, do not modify

# namespace: tflite
Expand Down Expand Up @@ -1523,6 +1588,7 @@ class BuiltinOptions(object):
UnsortedSegmentSumOptions = 121
ATan2Options = 122
SignOptions = 123
BitcastOptions = 124

def BuiltinOptionsCreator(unionType, table):
from flatbuffers.table import Table
Expand Down Expand Up @@ -1774,6 +1840,8 @@ def BuiltinOptionsCreator(unionType, table):
return ATan2OptionsT.InitFromBuf(table.Bytes, table.Pos)
if unionType == BuiltinOptions().SignOptions:
return SignOptionsT.InitFromBuf(table.Bytes, table.Pos)
if unionType == BuiltinOptions().BitcastOptions:
return BitcastOptionsT.InitFromBuf(table.Bytes, table.Pos)
return None
# automatically generated by the FlatBuffers compiler, do not modify

Expand Down Expand Up @@ -7454,7 +7522,7 @@ def __init__(self):
self.inputs = None # type: List[int]
self.outputs = None # type: List[int]
self.builtinOptionsType = 0 # type: int
self.builtinOptions = None # type: Union[None, Conv2DOptionsT, DepthwiseConv2DOptionsT, ConcatEmbeddingsOptionsT, LSHProjectionOptionsT, Pool2DOptionsT, SVDFOptionsT, RNNOptionsT, FullyConnectedOptionsT, SoftmaxOptionsT, ConcatenationOptionsT, AddOptionsT, L2NormOptionsT, LocalResponseNormalizationOptionsT, LSTMOptionsT, ResizeBilinearOptionsT, CallOptionsT, ReshapeOptionsT, SkipGramOptionsT, SpaceToDepthOptionsT, EmbeddingLookupSparseOptionsT, MulOptionsT, PadOptionsT, GatherOptionsT, BatchToSpaceNDOptionsT, SpaceToBatchNDOptionsT, TransposeOptionsT, ReducerOptionsT, SubOptionsT, DivOptionsT, SqueezeOptionsT, SequenceRNNOptionsT, StridedSliceOptionsT, ExpOptionsT, TopKV2OptionsT, SplitOptionsT, LogSoftmaxOptionsT, CastOptionsT, DequantizeOptionsT, MaximumMinimumOptionsT, ArgMaxOptionsT, LessOptionsT, NegOptionsT, PadV2OptionsT, GreaterOptionsT, GreaterEqualOptionsT, LessEqualOptionsT, SelectOptionsT, SliceOptionsT, TransposeConvOptionsT, SparseToDenseOptionsT, TileOptionsT, ExpandDimsOptionsT, EqualOptionsT, NotEqualOptionsT, ShapeOptionsT, PowOptionsT, ArgMinOptionsT, FakeQuantOptionsT, PackOptionsT, LogicalOrOptionsT, OneHotOptionsT, LogicalAndOptionsT, LogicalNotOptionsT, UnpackOptionsT, FloorDivOptionsT, SquareOptionsT, ZerosLikeOptionsT, FillOptionsT, BidirectionalSequenceLSTMOptionsT, BidirectionalSequenceRNNOptionsT, UnidirectionalSequenceLSTMOptionsT, FloorModOptionsT, RangeOptionsT, ResizeNearestNeighborOptionsT, LeakyReluOptionsT, SquaredDifferenceOptionsT, MirrorPadOptionsT, AbsOptionsT, SplitVOptionsT, UniqueOptionsT, ReverseV2OptionsT, AddNOptionsT, GatherNdOptionsT, CosOptionsT, WhereOptionsT, RankOptionsT, ReverseSequenceOptionsT, MatrixDiagOptionsT, QuantizeOptionsT, MatrixSetDiagOptionsT, HardSwishOptionsT, IfOptionsT, WhileOptionsT, DepthToSpaceOptionsT, NonMaxSuppressionV4OptionsT, NonMaxSuppressionV5OptionsT, ScatterNdOptionsT, SelectV2OptionsT, DensifyOptionsT, SegmentSumOptionsT, BatchMatMulOptionsT, CumsumOptionsT, CallOnceOptionsT, BroadcastToOptionsT, Rfft2dOptionsT, Conv3DOptionsT, HashtableOptionsT, HashtableFindOptionsT, HashtableImportOptionsT, HashtableSizeOptionsT, VarHandleOptionsT, ReadVariableOptionsT, AssignVariableOptionsT, RandomOptionsT, BucketizeOptionsT, GeluOptionsT, DynamicUpdateSliceOptionsT, UnsortedSegmentProdOptionsT, UnsortedSegmentMaxOptionsT, UnsortedSegmentMinOptionsT, UnsortedSegmentSumOptionsT, ATan2OptionsT, SignOptionsT]
self.builtinOptions = None # type: Union[None, Conv2DOptionsT, DepthwiseConv2DOptionsT, ConcatEmbeddingsOptionsT, LSHProjectionOptionsT, Pool2DOptionsT, SVDFOptionsT, RNNOptionsT, FullyConnectedOptionsT, SoftmaxOptionsT, ConcatenationOptionsT, AddOptionsT, L2NormOptionsT, LocalResponseNormalizationOptionsT, LSTMOptionsT, ResizeBilinearOptionsT, CallOptionsT, ReshapeOptionsT, SkipGramOptionsT, SpaceToDepthOptionsT, EmbeddingLookupSparseOptionsT, MulOptionsT, PadOptionsT, GatherOptionsT, BatchToSpaceNDOptionsT, SpaceToBatchNDOptionsT, TransposeOptionsT, ReducerOptionsT, SubOptionsT, DivOptionsT, SqueezeOptionsT, SequenceRNNOptionsT, StridedSliceOptionsT, ExpOptionsT, TopKV2OptionsT, SplitOptionsT, LogSoftmaxOptionsT, CastOptionsT, DequantizeOptionsT, MaximumMinimumOptionsT, ArgMaxOptionsT, LessOptionsT, NegOptionsT, PadV2OptionsT, GreaterOptionsT, GreaterEqualOptionsT, LessEqualOptionsT, SelectOptionsT, SliceOptionsT, TransposeConvOptionsT, SparseToDenseOptionsT, TileOptionsT, ExpandDimsOptionsT, EqualOptionsT, NotEqualOptionsT, ShapeOptionsT, PowOptionsT, ArgMinOptionsT, FakeQuantOptionsT, PackOptionsT, LogicalOrOptionsT, OneHotOptionsT, LogicalAndOptionsT, LogicalNotOptionsT, UnpackOptionsT, FloorDivOptionsT, SquareOptionsT, ZerosLikeOptionsT, FillOptionsT, BidirectionalSequenceLSTMOptionsT, BidirectionalSequenceRNNOptionsT, UnidirectionalSequenceLSTMOptionsT, FloorModOptionsT, RangeOptionsT, ResizeNearestNeighborOptionsT, LeakyReluOptionsT, SquaredDifferenceOptionsT, MirrorPadOptionsT, AbsOptionsT, SplitVOptionsT, UniqueOptionsT, ReverseV2OptionsT, AddNOptionsT, GatherNdOptionsT, CosOptionsT, WhereOptionsT, RankOptionsT, ReverseSequenceOptionsT, MatrixDiagOptionsT, QuantizeOptionsT, MatrixSetDiagOptionsT, HardSwishOptionsT, IfOptionsT, WhileOptionsT, DepthToSpaceOptionsT, NonMaxSuppressionV4OptionsT, NonMaxSuppressionV5OptionsT, ScatterNdOptionsT, SelectV2OptionsT, DensifyOptionsT, SegmentSumOptionsT, BatchMatMulOptionsT, CumsumOptionsT, CallOnceOptionsT, BroadcastToOptionsT, Rfft2dOptionsT, Conv3DOptionsT, HashtableOptionsT, HashtableFindOptionsT, HashtableImportOptionsT, HashtableSizeOptionsT, VarHandleOptionsT, ReadVariableOptionsT, AssignVariableOptionsT, RandomOptionsT, BucketizeOptionsT, GeluOptionsT, DynamicUpdateSliceOptionsT, UnsortedSegmentProdOptionsT, UnsortedSegmentMaxOptionsT, UnsortedSegmentMinOptionsT, UnsortedSegmentSumOptionsT, ATan2OptionsT, SignOptionsT, BitcastOptionsT]
self.customOptions = None # type: List[int]
self.customOptionsFormat = 0 # type: int
self.mutatingVariableInputs = None # type: List[bool]
Expand Down
9 changes: 7 additions & 2 deletions tensorflow/lite/schema/schema.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,8 @@ enum BuiltinOperator : int32 {
UNSORTED_SEGMENT_SUM = 155,
ATAN2 = 156,
UNSORTED_SEGMENT_MIN = 157,
SIGN = 158
SIGN = 158,
BITCAST = 159
}
// LINT.ThenChange(nnapi_linter/linter.proto)

Expand Down Expand Up @@ -541,7 +542,8 @@ union BuiltinOptions {
UnsortedSegmentMinOptions,
UnsortedSegmentSumOptions,
ATan2Options,
SignOptions
SignOptions,
BitcastOptions
}

// LINT.IfChange
Expand Down Expand Up @@ -1178,6 +1180,9 @@ table UnsortedSegmentMinOptions{
table SignOptions {
}

table BitcastOptions {
}


// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
Expand Down
Loading