Skip to content

Commit

Permalink
7.0b2 Release (#1945)
Browse files Browse the repository at this point in the history
* 7.0b2 Release

* Fix flake8 errors

* Delete comment copied from trees documentation

* Skip unit test
  • Loading branch information
TobyRoseman committed Aug 15, 2023
1 parent d52d536 commit 5765495
Show file tree
Hide file tree
Showing 128 changed files with 14,460 additions and 5,978 deletions.
47 changes: 26 additions & 21 deletions coremlpython/CoreMLPythonUtils.mm
Original file line number Diff line number Diff line change
Expand Up @@ -444,35 +444,40 @@ static size_t sizeOfArrayElement(MLMultiArrayDataType type) {
return py::none();
}
MLMultiArrayDataType type = value.dataType;
if (type == MLMultiArrayDataTypeFloat16) {
// Cast to fp32 because py:array doesn't support fp16.
// TODO: rdar://92239209 : return np.float16 instead of np.float32 when multiarray type is Float16
value = [MLMultiArray multiArrayByConcatenatingMultiArrays:@[value] alongAxis:0 dataType:MLMultiArrayDataTypeFloat32];
type = value.dataType;
}

std::vector<size_t> shape = Utils::convertNSArrayToCpp(value.shape);
std::vector<size_t> strides = Utils::convertNSArrayToCpp(value.strides);

// convert strides to numpy (bytes) instead of mlkit (elements)
for (size_t& stride : strides) {
stride *= sizeOfArrayElement(type);
}

switch (type) {
case MLMultiArrayDataTypeInt32:
return py::array(shape, strides, static_cast<const int32_t*>(value.dataPointer));
case MLMultiArrayDataTypeFloat32:
return py::array(shape, strides, static_cast<const float*>(value.dataPointer));
case MLMultiArrayDataTypeFloat16:
{
// create a float32 array, cast float16 values and copy into it
// TODO: rdar://92239209 : return np.float16 instead of np.float32 when multiarray type is Float16
std::vector<float> value_fp32(value.count, 0.0);
for (size_t i=0; i<value.count; i++) {
value_fp32[i] = [value[i] floatValue];
}
return py::array(shape, strides, value_fp32.data());

__block py::object array;
[value getBytesWithHandler:^(const void *bytes, NSInteger size) {
switch (type) {
case MLMultiArrayDataTypeInt32:
array = py::array(shape, strides, reinterpret_cast<const int32_t *>(bytes));
break;
case MLMultiArrayDataTypeFloat32:
array = py::array(shape, strides, reinterpret_cast<const float *>(bytes));
break;
case MLMultiArrayDataTypeFloat64:
array = py::array(shape, strides, reinterpret_cast<const double *>(bytes));
break;
default:
assert(false);
array = py::object();
}
case MLMultiArrayDataTypeDouble:
return py::array(shape, strides, static_cast<const double*>(value.dataPointer));
default:
assert(false);
return py::object();
}
}];

return array;
}

py::object Utils::convertDictionaryValueToPython(NSDictionary<NSObject *,NSNumber *> * dict) {
Expand Down
59 changes: 45 additions & 14 deletions coremltools/converters/_converters_entry.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
)
from coremltools import ComputeUnit as _ComputeUnit
from coremltools import __version__ as _ct_version
from coremltools import _logger as logger
from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH
from coremltools.converters._profile_utils import _profile
from coremltools.converters.mil._deployment_compatibility import (
Expand Down Expand Up @@ -156,6 +157,8 @@ def convert(
``ct.utils.rename_feature`` API.
- If ``dtype`` is not specified, it defaults to the ``dtype`` of the
inputs in the TF model.
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
When ``inputs`` not provided or ``dtype`` not specified. The float 32 inputs defaults to float 16.
* PyTorch:
- The ``inputs`` parameter is required.
Expand All @@ -166,7 +169,10 @@ def convert(
- If the ``name`` argument is specified with ``TensorType`` or
``ImageType``, the converted Core ML model will have inputs with
the same name.
- If ``dtype`` is missing, it defaults to float 32.
- If ``dtype`` is missing:
* For ``minimum_deployment_target <= ct.target.macOS12``, it defaults to float 32.
* For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
It defaults to float 16.
outputs : list of ``TensorType`` or ``ImageType`` (optional)
Expand Down Expand Up @@ -206,13 +212,20 @@ def convert(
- If specified, the ``name`` with ``TensorType`` or ``ImageType``
must correspond to a node in the TF graph. In this case, the model
will be converted up to that node.
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
If ``dtype`` not specified, the outputs inferred of type float 32
defaults to float 16.
* PyTorch:
- If specified, the length of the list must match the number of
outputs returned by the PyTorch model.
- If ``name`` is specified, it is applied to the output names of the
converted Core ML model.
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
If ``dtype`` not specified, the outputs inferred of type float 32
defaults to float 16.
classifier_config : ClassifierConfig class (optional)
The configuration if the MLModel is intended to be a classifier.
Expand All @@ -221,7 +234,7 @@ def convert(
A member of the ``coremltools.target`` enum.
The value of this parameter determines the type of the model
representation produced by the converter. To learn about the differences
between neural networks and ML programs, see
between ML programs and neural networks, see
`ML Programs <https://coremltools.readme.io/docs/ml-programs>`_.
- The converter produces a neural network (``neuralnetwork``) if:
Expand All @@ -239,33 +252,34 @@ def convert(
coremltools.target.tvOS15:
- If neither the ``minimum_deployment_target`` nor the ``convert_to``
parameter is specified, the converter produces the neural network
parameter is specified, the converter produces the ML programs
model type with as minimum of a deployment target as possible.
- If this parameter is specified and ``convert_to`` is also specified,
they must be compatible. The following are examples of invalid values:
::
# Invalid:
convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15
# Invalid:
convert_to="mlprogram", minimum_deployment_target=coremltools.target.iOS14
# Invalid:
convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15
convert_to : str (optional)
Must be one of [``'neuralnetwork'``, ``'mlprogram'``, ``'milinternal'``].
Must be one of [``'mlprogram'``, ``'neuralnetwork'``, ``'milinternal'``].
The value of this parameter determines the type of the model
representation produced by the converter. To learn about the
differences between neural networks and ML programs, see
differences between ML programs and neural networks, see
`ML Programs <https://coremltools.readme.io/docs/ml-programs>`_.
- ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``)
containing a MILSpec.Program proto, which is the Core ML program format.
The model saved from this returned object is executable on iOS15,
macOS12, watchOS8, and tvOS15.
- ``'neuralnetwork'``: Returns an MLModel (``coremltools.models.MLModel``)
containing a NeuralNetwork proto, which is the original Core ML format.
The model saved from this returned object is executable either on
iOS13/macOS10.15/watchOS6/tvOS13 and newer, or on
iOS14/macOS11/watchOS7/tvOS14 and newer, depending on the layers used
in the model.
- ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``)
containing a MILSpec.Program proto, which is the Core ML program format.
The model saved from this returned object is executable on iOS15,
macOS12, watchOS8, and tvOS15.
- ``'milinternal'``: Returns an MIL program object
(``coremltools.converters.mil.Program``). An MIL program is primarily
used for debugging and inspection. It can be converted to an MLModel for
Expand All @@ -275,7 +289,7 @@ def convert(
ct.convert(mil_program, convert_to="mlprogram")
- If neither the ``minimum_deployment_target`` nor the ``convert_to``
parameter is specified, the converter produces the neural network
parameter is specified, the converter produces the ML programs
model type with as minimum of a deployment target as possible.
compute_precision : coremltools.precision enumeration or ct.transform.FP16ComputePrecision() (optional)
Expand Down Expand Up @@ -504,10 +518,11 @@ def skip_real_div_ops(op):
exact_target,
minimum_deployment_target,
)
need_fp16_cast_pass = _need_fp16_cast_pass(compute_precision, exact_target)

if pass_pipeline is None:
pass_pipeline = PassPipeline()
if not _need_fp16_cast_pass(compute_precision, exact_target):
if not need_fp16_cast_pass:
pass_pipeline.remove_passes({"common::add_fp16_cast"})
if isinstance(compute_precision, FP16ComputePrecision):
# For backward compatibility with the `op_selector` param in FP16ComputePrecision.
Expand All @@ -527,6 +542,12 @@ def skip_real_div_ops(op):
if specification_version is None:
specification_version = _set_default_specification_version(exact_target)

use_default_fp16_io = (
specification_version is not None
and specification_version >= AvailableTarget.iOS16
and need_fp16_cast_pass
)

mlmodel = mil_convert(
model,
convert_from=exact_source,
Expand All @@ -540,6 +561,7 @@ def skip_real_div_ops(op):
debug=debug,
specification_version=specification_version,
main_pipeline=pass_pipeline,
use_default_fp16_io=use_default_fp16_io,
)

if exact_target == "mlprogram" and mlmodel._input_has_infinite_upper_bound():
Expand Down Expand Up @@ -890,6 +912,15 @@ def _determine_target(convert_to, minimum_deployment_target):
"""
Infer the precise backend target, which could be one of ``milinternal``, ``neuralnetwork`` or ``mlprogram``
"""
if minimum_deployment_target is None and convert_to is None:
logger.warning(
"When both 'convert_to' and 'minimum_deployment_target' not specified, "
"'convert_to' is set to \"mlprogram\" and 'minimum_deployment_targer' is set to "
"ct.target.iOS15 (which is same as ct.target.macOS12). "
"Note: the model will not run on systems older than iOS15/macOS12/watchOS8/tvOS15. "
"In order to make your model run on older system, please set the 'minimum_deployment_target' to iOS14/iOS13. "
"Details please see the link: https://coremltools.readme.io/docs/unified-conversion-api#target-conversion-formats"
)
if minimum_deployment_target is not None:
if convert_to == "mlprogram" and minimum_deployment_target < AvailableTarget.iOS15:
raise ValueError(
Expand All @@ -908,7 +939,7 @@ def _determine_target(convert_to, minimum_deployment_target):
return convert_to
else:
if minimum_deployment_target is None:
return "neuralnetwork"
return "mlprogram"
elif minimum_deployment_target <= AvailableTarget.iOS14:
return "neuralnetwork"
else:
Expand Down
42 changes: 24 additions & 18 deletions coremltools/converters/mil/backend/mil/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,12 @@
import coremltools.proto.FeatureTypes_pb2 as ft
import coremltools.proto.MIL_pb2 as pm
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import (builtin_to_proto_types,
builtin_to_string,
numpy_type_to_builtin_type,
type_to_builtin_type)
from coremltools.converters.mil.mil.types import (
BUILTIN_TO_PROTO_TYPES,
builtin_to_string,
numpy_type_to_builtin_type,
type_to_builtin_type,
)
from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type
from coremltools.models.utils import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME

Expand Down Expand Up @@ -91,20 +93,30 @@ def update_tensortype(t_type, shape, data_type):
set_proto_dim(t_dim, s)

def _tensor_field_by_type(tensor_val, builtin_type):
"""
Pick the field based on the builtin_type.
The field is defined in TensorValue in ``mlmodel/format/MIL.proto``.
The picked field need to be consistent with how it will be read by MIL.
For example, int8 is serialized to ``bytes`` field while int16 is serialized to ``ints`` field.
"""
if builtin_type == types.bool:
return tensor_val.bools.values
elif types.is_int(builtin_type):
if (builtin_type == types.int64 or builtin_type == types.uint64):
if builtin_type == types.int64 or builtin_type == types.uint64:
return tensor_val.longInts.values
if builtin_type in (types.int8, types.uint8, types.uint32):
return tensor_val.bytes.values
if builtin_type == types.int16 or builtin_type == types.uint16:
# TODO (rdar://111797203): Serialize to byte after MIL changes to read from byte field.
return tensor_val.ints.values
return tensor_val.ints.values
elif types.is_float(builtin_type):
if (builtin_type == types.fp64):
if builtin_type == types.fp64:
return tensor_val.doubles.values
elif (builtin_type == types.fp32):
elif builtin_type == types.fp32:
return tensor_val.floats.values
elif (builtin_type == types.fp16):
elif builtin_type == types.fp16:
return tensor_val.bytes.values
else:
raise TypeError(
Expand Down Expand Up @@ -177,14 +189,8 @@ def create_scalar_value(py_scalar):

# Set the tensor value
t_field = _tensor_field_by_type(t_val, builtin_type)
if builtin_type in (
types.fp16,
types.int8,
types.uint8,
types.int16,
types.uint16,
types.uint32,
):
if builtin_type in (types.fp16, types.int8, types.uint8, types.uint32):
# Serialize to bytes because MIL read them from the "bytes" field in TensorValue.
val.immediateValue.tensor.bytes.values = np_val_to_py_type(py_scalar)
else:
if builtin_type == types.str:
Expand Down Expand Up @@ -243,7 +249,7 @@ def create_file_value_tensor(file_name, offset, dim, data_type):


def types_to_proto_primitive(valuetype):
if valuetype not in builtin_to_proto_types:
if valuetype not in BUILTIN_TO_PROTO_TYPES:
additional_error_msg = ""
if valuetype in (types.complex64, types.complex128):
additional_error_msg = (
Expand All @@ -253,7 +259,7 @@ def types_to_proto_primitive(valuetype):
raise ValueError(
f"Unknown map from SSA type {valuetype} to Proto type. {additional_error_msg}"
)
return builtin_to_proto_types[valuetype]
return BUILTIN_TO_PROTO_TYPES[valuetype]


def types_to_proto(valuetype):
Expand Down
7 changes: 6 additions & 1 deletion coremltools/converters/mil/backend/mil/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,12 @@ def load(prog, weights_dir, resume_on_errors=False, specification_version=_SPECI
# Classifier outputs are set up separately, so default to fp32 for now.
dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32

array_type = ft.ArrayFeatureType(shape=None, dataType=dataType)
output_shape = (
None
if any_symbolic(var.shape) or types.is_primitive(var.sym_type)
else var.shape
)
array_type = ft.ArrayFeatureType(shape=output_shape, dataType=dataType)
output_feature_type.multiArrayType.CopyFrom(array_type)
output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type))
elif (types.is_dict(var.sym_type)):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def _adjust_var_dtype_helper(var, dtype):
def _get_io_supported_types(opset_version: target) -> Set[type]:
"""Get Core ML I/O supported data types based on opset version."""
supported_types = {types.fp32, types.int32}
if opset_version >= target.iOS16:
if opset_version is not None and opset_version >= target.iOS16:
supported_types.add(types.fp16)
return supported_types

Expand All @@ -88,7 +88,7 @@ def _get_runtime_supported_types(opset_version: target) -> Set[type]:
"""Get Core ML Runtime supported data types based on opset version."""
supported_types = {types.fp16, types.fp32, types.int32, types.str, types.bool}
if opset_version >= target.iOS17:
supported_types.update({types.int16, types.uint16})
supported_types.update({types.int8, types.uint8, types.int16, types.uint16})
return supported_types


Expand Down
Loading

1 comment on commit 5765495

@antmikinka
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Love it! Thank you!

Please sign in to comment.