Skip to content

Commit

Permalink
8.2 Release (#2433)
Browse files Browse the repository at this point in the history
* 8.2 Release

* bump version to 8.2

* fix flake8 error https://gitlab.com/coremltools1/coremltools/-/jobs/8889872126

* fix CI flakiness: avoid duplicate elements in input tensor so output indices are unique

---------

Co-authored-by: yifan_shen3 <[email protected]>
  • Loading branch information
YifanShenSZ and yifan_shen3 authored Jan 21, 2025
1 parent babbb03 commit 1a0d051
Show file tree
Hide file tree
Showing 109 changed files with 3,198 additions and 2,487 deletions.
8 changes: 4 additions & 4 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ test_py39_pytorch_intel:
PYTHON: "3.9"
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp39*10_15*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_torch.pip

test_py37_tf1_intel:
<<: *test_macos_pkg
Expand Down Expand Up @@ -200,7 +200,7 @@ test_py310_pytorch_script:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_torch.pip
TORCH_FRONTENDS: TORCHSCRIPT

test_py310_pytorch_export:
Expand All @@ -213,7 +213,7 @@ test_py310_pytorch_export:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_torch.pip
TORCH_FRONTENDS: TORCHEXPORT

test_py310_pytorch_executorch:
Expand All @@ -226,7 +226,7 @@ test_py310_pytorch_executorch:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_executorch.pip
TORCH_FRONTENDS: EXECUTORCH

test_py310_tf2-1:
Expand Down
26 changes: 24 additions & 2 deletions coremlpython/CoreMLPython.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#import <Availability.h>

#import <vector>
#import <optional>

#ifndef BUILT_WITH_MACOS15_SDK
#define BUILT_WITH_MACOS15_SDK \
Expand Down Expand Up @@ -73,6 +74,9 @@ namespace CoreML {
};

struct CPUComputeDevice {
inline CPUComputeDevice():
m_impl(nil) {}

// MLCPUComputeDevice must be wrapped in a C++ class for PyBind.
inline CPUComputeDevice(id impl):
m_impl(impl) {}
Expand All @@ -90,6 +94,9 @@ namespace CoreML {
};

struct GPUComputeDevice {
inline GPUComputeDevice():
m_impl(nil) {}

// MLGPUComputeDevice must be wrapped in a C++ class for PyBind.
inline GPUComputeDevice(id impl):
m_impl(impl) {}
Expand All @@ -107,6 +114,9 @@ namespace CoreML {
};

struct NeuralEngineComputeDevice {
inline NeuralEngineComputeDevice():
m_impl(nil) {}

// MLNeuralEngineComputeDevice must be wrapped in a C++ class for PyBind.
inline NeuralEngineComputeDevice(id impl):
m_impl(impl) {}
Expand Down Expand Up @@ -160,6 +170,9 @@ namespace CoreML {
};

struct ComputePlan {
inline ComputePlan():
m_impl(nil), m_modelStructure(py::none()) {}

// MLComputePlan must be wrapped in a C++ class for PyBind.
inline ComputePlan(id impl, py::object modelStructure):
m_impl(impl),
Expand Down Expand Up @@ -191,10 +204,12 @@ namespace CoreML {
m_impl(impl),
m_datas(std::move(datas)) {}

#if ML_MODEL_ASSET_IS_AVAILABLE
API_AVAILABLE(macos(13.0))
inline MLModelAsset *getImpl() const {
return (MLModelAsset *)m_impl;
}
#endif

id m_impl = nil;
std::vector<py::bytes> m_datas;
Expand All @@ -205,6 +220,8 @@ namespace CoreML {
MLModel *m_model = nil;
NSURL *compiledUrl = nil;
bool m_deleteCompiledModelOnExit = false;
std::optional<uint64_t> m_loadDurationInNanoSeconds;
std::optional<uint64_t> m_lastPredictDurationInNanoSeconds;

public:
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
Expand All @@ -229,11 +246,16 @@ namespace CoreML {

explicit Model(MLModel* m_model, NSURL* compiledUrl, bool deleteCompiledModelOnExit);

py::list batchPredict(const py::list& batch) const;
py::list batchPredict(const py::list& batch);

py::str getCompiledModelPath() const;

py::dict predict(const py::dict& input, State* state=NULL) const;
py::dict predict(const py::dict& input, State* state=NULL);

py::object getLoadDurationInNanoSeconds() const;

py::object getLastPredictDurationInNanoSeconds() const;


#if BUILT_WITH_MACOS15_SDK
static void setOptimizationHints(MLModelConfiguration *configuration, const py::dict& optimizationHints);
Expand Down
72 changes: 61 additions & 11 deletions coremlpython/CoreMLPython.mm
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

#import <CoreML/CoreML.h>
#include <mach/mach_time.h>
#import "CoreMLPythonArray.h"
#import "CoreMLPython.h"
#import "CoreMLPythonUtils.h"
Expand Down Expand Up @@ -289,20 +290,34 @@
}
#endif

uint64_t convertMachTimeToNanoSeconds(uint64_t time) {
static dispatch_once_t once;
static mach_timebase_info_data_t timebase;
dispatch_once(&once, ^{
mach_timebase_info(&timebase);
});
uint64_t result = (time * timebase.numer) / timebase.denom;
return result;
}

#if ML_MODEL_ASSET_IS_AVAILABLE
API_AVAILABLE(macos(13.0))
MLModel * createModelFromModelAsset(MLModelAsset *modelAsset,
MLModelConfiguration *configuration,
NSError * __autoreleasing *error) {
std::pair<MLModel *, uint64_t> createModelFromModelAsset(
MLModelAsset *modelAsset,
MLModelConfiguration *configuration,
NSError * __autoreleasing *error
) {
dispatch_semaphore_t sem = dispatch_semaphore_create(0);
__block MLModel *result = nil;
__block NSError *lError = nil;
uint64_t loadStartTime = mach_absolute_time();
__block uint64_t loadEndTime = loadStartTime;
[MLModel loadModelAsset:modelAsset
configuration:configuration
completionHandler:^(MLModel * _Nullable model, NSError * _Nullable loadError){
result = model;
lError = loadError;
loadEndTime = mach_absolute_time();
dispatch_semaphore_signal(sem);
}];

Expand All @@ -312,9 +327,9 @@
*error = lError;
}

return result;
uint64_t loadDurationInNanoSeconds = convertMachTimeToNanoSeconds(loadEndTime - loadStartTime);
return {result, loadDurationInNanoSeconds};
}

#endif
}

Expand Down Expand Up @@ -380,18 +395,25 @@ bool usingMacOS13OrHigher() {
configuration.functionName = [NSString stringWithUTF8String:functionName.c_str()];
}
#endif
uint64_t loadDurationInNanoSeconds = 0;
// Create MLModel
if (asset.is_none()) {
uint64_t loadStartTime = mach_absolute_time();
m_model = [MLModel modelWithContentsOfURL:compiledUrl configuration:configuration error:&error];
uint64_t loadEndTime = mach_absolute_time();
loadDurationInNanoSeconds = convertMachTimeToNanoSeconds(loadEndTime - loadStartTime);
} else {
#if ML_MODEL_ASSET_IS_AVAILABLE
m_model = createModelFromModelAsset(py::cast<ModelAsset>(asset).getImpl(), configuration, &error);
auto pair = createModelFromModelAsset(py::cast<ModelAsset>(asset).getImpl(), configuration, &error);
m_model = pair.first;
loadDurationInNanoSeconds = pair.second;
#else
throw std::runtime_error("MLModelAsset is only available on macOS >= 13.0");
#endif
}

Utils::handleError(error);
m_loadDurationInNanoSeconds = loadDurationInNanoSeconds;
}
}

Expand All @@ -410,13 +432,14 @@ bool usingMacOS13OrHigher() {
}


py::dict Model::predict(const py::dict& input, State* state) const {
py::dict Model::predict(const py::dict& input, State* state) {
@autoreleasepool {
NSError *error = nil;
MLDictionaryFeatureProvider *inFeatures = Utils::dictToFeatures(input, &error);
Utils::handleError(error);

id<MLFeatureProvider> outFeatures;
uint64_t predictStartTime = mach_absolute_time();
#if BUILT_WITH_MACOS15_SDK
if (state == NULL) {
outFeatures = [m_model predictionFromFeatures:static_cast<MLDictionaryFeatureProvider * _Nonnull>(inFeatures)
Expand All @@ -430,8 +453,10 @@ bool usingMacOS13OrHigher() {
outFeatures = [m_model predictionFromFeatures:static_cast<MLDictionaryFeatureProvider * _Nonnull>(inFeatures)
error:&error];
#endif

uint64_t predictEndTime = mach_absolute_time();
Utils::handleError(error);

m_lastPredictDurationInNanoSeconds = convertMachTimeToNanoSeconds(predictEndTime - predictStartTime);
return Utils::featuresToDict(outFeatures);
}
}
Expand Down Expand Up @@ -485,7 +510,7 @@ bool usingMacOS13OrHigher() {
}
#endif

py::list Model::batchPredict(const py::list& batch) const {
py::list Model::batchPredict(const py::list& batch) {
@autoreleasepool {
NSError* error = nil;

Expand All @@ -498,11 +523,14 @@ bool usingMacOS13OrHigher() {
}
MLArrayBatchProvider* batchProvider = [[MLArrayBatchProvider alloc] initWithFeatureProviderArray: array];

uint64_t predictStartTime = mach_absolute_time();
// Get predictions
MLArrayBatchProvider* predictions = (MLArrayBatchProvider*)[m_model predictionsFromBatch:batchProvider
error:&error];
uint64_t predictEndTime = mach_absolute_time();
Utils::handleError(error);

m_lastPredictDurationInNanoSeconds = convertMachTimeToNanoSeconds(predictEndTime - predictStartTime);
// Convert predictions to output
py::list ret;
for (int i = 0; i < predictions.array.count; i++) {
Expand Down Expand Up @@ -773,6 +801,22 @@ bool usingMacOS13OrHigher() {
return CoreML::MLMODEL_SPECIFICATION_VERSION_NEWEST;
}

py::object Model::getLoadDurationInNanoSeconds() const {
if (m_loadDurationInNanoSeconds) {
return py::cast(m_loadDurationInNanoSeconds.value());
}

return py::none();
}

py::object Model::getLastPredictDurationInNanoSeconds() const {
if (m_lastPredictDurationInNanoSeconds) {
return py::cast(m_lastPredictDurationInNanoSeconds.value());
}

return py::none();
}

/*
*
* bindings
Expand All @@ -788,6 +832,8 @@ bool usingMacOS13OrHigher() {
.def("predict", &Model::predict)
.def("batchPredict", &Model::batchPredict)
.def("get_compiled_model_path", &Model::getCompiledModelPath)
.def("get_load_duration_in_nano_seconds", &Model::getLoadDurationInNanoSeconds)
.def("get_last_predict_duration_in_nano_seconds", &Model::getLastPredictDurationInNanoSeconds)
.def_static("auto_set_specification_version", &Model::autoSetSpecificationVersion)
.def_static("maximum_supported_specification_version", &Model::maximumSupportedSpecificationVersion)
#if BUILT_WITH_MACOS15_SDK
Expand All @@ -804,14 +850,18 @@ bool usingMacOS13OrHigher() {
py::class_<State>(m, "_State", py::module_local());

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
py::class_<CPUComputeDevice>(m, "_MLCPUComputeDeviceProxy", py::module_local());
py::class_<GPUComputeDevice>(m, "_MLGPUComputeDeviceProxy", py::module_local());
py::class_<CPUComputeDevice>(m, "_MLCPUComputeDeviceProxy", py::module_local())
.def(py::init());
py::class_<GPUComputeDevice>(m, "_MLGPUComputeDeviceProxy", py::module_local())
.def(py::init());
py::class_<NeuralEngineComputeDevice>(m, "_MLNeuralEngineComputeDeviceProxy", py::module_local())
.def(py::init())
.def("get_total_core_count", &NeuralEngineComputeDevice::getTotalCoreCount);
#endif

#if ML_COMPUTE_PLAN_IS_AVAILABLE
py::class_<ComputePlan>(m, "_MLComputePlanProxy", py::module_local())
.def(py::init())
.def_property_readonly("model_structure", &ComputePlan::getModelStructure)
.def("get_compute_device_usage_for_mlprogram_operation", &ComputePlan::getComputeDeviceUsageForMLProgramOperation)
.def("get_compute_device_usage_for_neuralnetwork_layer", &ComputePlan::getComputeDeviceUsageForNeuralNetworkLayer)
Expand Down
2 changes: 1 addition & 1 deletion coremltools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,9 @@ class SpecializationStrategy(_Enum):
_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK = _SPECIFICATION_VERSION_IOS_13
_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM = _SPECIFICATION_VERSION_IOS_15


# expose sub packages as directories
from . import converters, models, optimize, proto

# expose unified converter in coremltools package level
from .converters import ClassifierConfig
from .converters import ColorLayout as colorlayout
Expand Down
2 changes: 1 addition & 1 deletion coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def __get_sklearn_version(version):

# ---------------------------------------------------------------------------------------
_HAS_TORCH = True
_TORCH_MAX_VERSION = "2.4.0"
_TORCH_MAX_VERSION = "2.5.0"
_HAS_TORCH_EXPORT_API = False
_CT_OPTIMIZE_TORCH_MIN_VERSION = "2.1.0"
_IMPORT_CT_OPTIMIZE_TORCH = False
Expand Down
12 changes: 6 additions & 6 deletions coremltools/converters/libsvm/_libsvm_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

import coremltools as ct
from coremltools import __version__ as ct_version
from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION
from coremltools import proto

from ... import SPECIFICATION_VERSION
from ..._deps import _HAS_LIBSVM
Expand Down Expand Up @@ -58,12 +59,11 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
from libsvm import svm as _svm

from ...models import MLModel
from ...proto import Model_pb2

svm_type_enum = libsvm_model.param.svm_type

# Create the spec
export_spec = Model_pb2.Model()
export_spec = proto.Model_pb2.Model()
export_spec.specificationVersion = SPECIFICATION_VERSION

if svm_type_enum == _svm.EPSILON_SVR or svm_type_enum == _svm.NU_SVR:
Expand All @@ -90,7 +90,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
input = export_spec.description.input.add()
input.name = feature_names
input.type.multiArrayType.shape.append(input_length)
input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE
input.type.multiArrayType.dataType = proto.Model_pb2.ArrayFeatureType.DOUBLE

else:
# input will be a series of doubles
Expand Down Expand Up @@ -193,7 +193,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
from libsvm import __version__ as libsvm_version

libsvm_version = "libsvm=={0}".format(libsvm_version)
model.user_defined_metadata[_METADATA_VERSION] = ct_version
model.user_defined_metadata[_METADATA_SOURCE] = libsvm_version
model.user_defined_metadata[ct.models._METADATA_VERSION] = ct_version
model.user_defined_metadata[ct.models._METADATA_SOURCE] = libsvm_version

return model
Loading

0 comments on commit 1a0d051

Please sign in to comment.