Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrate backends/apple/coreml to the new namespace #6076

Open
wants to merge 1 commit into
base: release/0.4
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 32 additions & 17 deletions backends/apple/coreml/runtime/delegate/coreml_backend_delegate.mm
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,22 @@
#endif

namespace {
using namespace torch::executor;
using namespace executorchcoreml;

using executorch::aten::ScalarType;
using executorch::runtime::ArrayRef;
using executorch::runtime::Backend;
using executorch::runtime::BackendExecutionContext;
using executorch::runtime::BackendInitContext;
using executorch::runtime::CompileSpec;
using executorch::runtime::DelegateHandle;
using executorch::runtime::EValue;
using executorch::runtime::Error;
using executorch::runtime::EventTracerDebugLogLevel;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::get_backend_class;
using executorch::runtime::Result;

std::optional<MultiArray::DataType> get_data_type(ScalarType scalar_type) {
switch (scalar_type) {
case ScalarType::Bool:
Expand Down Expand Up @@ -60,14 +73,14 @@
if (!eValue->isTensor()) {
return std::nullopt;
}

auto tensor = eValue->toTensor();
auto dataType = get_data_type(tensor.scalar_type());
if (!dataType.has_value()) {
ET_LOG(Error, "%s: DataType=%d is not supported", ETCoreMLStrings.delegateIdentifier.UTF8String, (int)tensor.scalar_type());
return std::nullopt;
}

std::vector<ssize_t> strides(tensor.strides().begin(), tensor.strides().end());
std::vector<size_t> shape(tensor.sizes().begin(), tensor.sizes().end());
MultiArray::MemoryLayout layout(dataType.value(), std::move(shape), std::move(strides));
Expand All @@ -86,29 +99,29 @@
if (!dict) {
return std::nullopt;
}

BackendDelegate::Config config;
{
NSNumber *should_prewarm_model = SAFE_CAST(dict[@"shouldPrewarmModel"], NSNumber);
if (should_prewarm_model) {
config.should_prewarm_model = static_cast<bool>(should_prewarm_model.boolValue);
}
}

{
NSNumber *should_prewarm_asset = SAFE_CAST(dict[@"shouldPrewarmAsset"], NSNumber);
if (should_prewarm_asset) {
config.should_prewarm_asset = static_cast<bool>(should_prewarm_asset.boolValue);
}
}

{
NSNumber *max_models_cache_size_in_bytes = SAFE_CAST(dict[@"maxModelsCacheSizeInBytes"], NSNumber);
if (max_models_cache_size_in_bytes) {
config.max_models_cache_size = max_models_cache_size_in_bytes.unsignedLongLongValue;
}
}

return config;
}

Expand All @@ -127,14 +140,15 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
auto debug_level = event_tracer->event_tracer_debug_level();
options.log_intermediate_tensors = (debug_level >= EventTracerDebugLogLevel::kIntermediateOutputs);
}

return options;
}

} //namespace

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace coreml {

using namespace executorchcoreml;

Expand All @@ -154,7 +168,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
auto buffer = Buffer(spec.value.buffer, spec.value.nbytes);
specs_map.emplace(spec.key, std::move(buffer));
}

auto buffer = Buffer(processed->data(), processed->size());
std::error_code error;
auto handle = impl_->init(std::move(buffer), specs_map);
Expand All @@ -173,7 +187,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
size_t nInputs = nArgs.first;
size_t nOutputs = nArgs.second;
delegate_args.reserve(nInputs + nOutputs);

// inputs
for (size_t i = 0; i < nInputs; i++) {
auto multi_array = get_multi_array(args[i], ArgType::Input);
Expand All @@ -182,7 +196,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
"%s: Failed to create multiarray from input at args[%zu]", ETCoreMLStrings.delegateIdentifier.UTF8String, i);
delegate_args.emplace_back(std::move(multi_array.value()));
}

// outputs
for (size_t i = nInputs; i < nInputs + nOutputs; i++) {
auto multi_array = get_multi_array(args[i], ArgType::Output);
Expand All @@ -191,7 +205,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
"%s: Failed to create multiarray from output at args[%zu]", ETCoreMLStrings.delegateIdentifier.UTF8String, i);
delegate_args.emplace_back(std::move(multi_array.value()));
}

auto logging_options = get_logging_options(context);
std::error_code ec;
#ifdef ET_EVENT_TRACER_ENABLED
Expand All @@ -206,7 +220,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
"%s: Failed to run the model.",
ETCoreMLStrings.delegateIdentifier.UTF8String);
#endif

return Error::Ok;
}

Expand Down Expand Up @@ -235,5 +249,6 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
static auto success_with_compiler = register_backend(backend);
}

} // namespace executor
} // namespace torch
} // namespace coreml
} // namespace backends
} // namespace executorch
23 changes: 15 additions & 8 deletions backends/apple/coreml/runtime/include/coreml_backend/delegate.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ namespace executorchcoreml {
class BackendDelegate;
}

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace coreml {

class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterface {
public:
Expand All @@ -34,24 +35,28 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
/// produce `processed`.
/// @retval On success, an opaque handle representing the loaded model
/// otherwise an`Error` case.
Result<DelegateHandle*>
init(BackendInitContext& context, FreeableBuffer* processed, ArrayRef<CompileSpec> compileSpecs) const override;
executorch::runtime::Result<executorch::runtime::DelegateHandle*>
init(executorch::runtime::BackendInitContext& context,
executorch::runtime::FreeableBuffer* processed,
executorch::runtime::ArrayRef<executorch::runtime::CompileSpec> compileSpecs) const override;

/// Executes the loaded model.
///
/// @param context An execution context specific to the CoreML backend.
/// @param handle The handle returned by an earlier call to `init`.
/// @param args The models inputs and outputs.
/// @retval On success, `Error::Ok` otherwise any other `Error` case.
Error execute(BackendExecutionContext& context, DelegateHandle* handle, EValue** args) const override;
executorch::runtime::Error execute(executorch::runtime::BackendExecutionContext& context,
executorch::runtime::DelegateHandle* handle,
executorch::runtime::EValue** args) const override;

/// Returns `true` if the delegate is available otherwise `false`.
bool is_available() const override;

/// Unloads the loaded CoreML model with the specified handle.
///
/// @param handle The handle returned by an earlier call to `init`.
void destroy(DelegateHandle* handle) const override;
void destroy(executorch::runtime::DelegateHandle* handle) const override;

/// Returns the registered `CoreMLBackendDelegate` instance.
static CoreMLBackendDelegate* get_registered_delegate() noexcept;
Expand All @@ -65,5 +70,7 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
private:
std::shared_ptr<executorchcoreml::BackendDelegate> impl_;
};
} // namespace executor
} // namespace torch

} // namespace coreml
} // namespace backends
} // namespace executorch
16 changes: 10 additions & 6 deletions backends/apple/coreml/runtime/sdk/model_event_logger_impl.mm
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,19 @@

namespace {

using namespace torch::executor;
using namespace executorch::runtime;

using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::aten::TensorImpl;

uint64_t time_units_to_nano_seconds(uint64_t time_units) {
static mach_timebase_info_data_t info;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
NSCAssert(mach_timebase_info(&info) == KERN_SUCCESS, @"ModelEventLogger: Failed to get time base.");
});

return time_units * info.numer / info.denom;
}

Expand Down Expand Up @@ -100,7 +104,7 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
estimated_execution_end_time_in_ns,
metadata.bytes,
metadata.length);

}];
}

Expand All @@ -109,7 +113,7 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
[op_path_to_value_map enumerateKeysAndObjectsUsingBlock:^(ETCoreMLModelStructurePath *path,
MLMultiArray *intermediate_value,
BOOL * _Nonnull __unused stop) {
using namespace torch::executor;
using namespace executorch::runtime;

@autoreleasepool {
NSString *debug_symbol = op_path_to_debug_symbol_name_map[path];
Expand All @@ -123,15 +127,15 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
}

MLMultiArray *supported_value = value;
NSArray<NSNumber *> *shape = supported_value.shape;
NSArray<NSNumber *> *shape = supported_value.shape;
NSError *local_error = nil;
MLMultiArrayDataType data_type = get_supported_data_type(value.dataType);

if (!is_packed(shape, value.strides) || (supported_value.dataType != data_type)) {
supported_value = [[MLMultiArray alloc] initWithShape:shape
dataType:data_type
error:&local_error];
NSCAssert(supported_value != nil,
NSCAssert(supported_value != nil,
@"ModelEventLoggerImpl: Failed to create packed multiarray with shape=%@, dataType=%ld, error=%@.",
shape,
static_cast<long>(value.dataType),
Expand Down
2 changes: 1 addition & 1 deletion backends/apple/coreml/runtime/test/BackendDelegateTests.mm
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ + (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString
}

+ (void)setUp {
torch::executor::runtime_init();
executorch::runtime::runtime_init();
}

- (void)setUp {
Expand Down
10 changes: 5 additions & 5 deletions backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

static constexpr size_t kRuntimeMemorySize = 50 * 1024U * 1024U; // 50 MB

using namespace torch::executor;
using torch::executor::testing::TensorFactory;
using namespace executorch::runtime;
using executorch::runtime::testing::TensorFactory;

namespace {
// TODO: Move the following methods to a utility class, so that it can be shared with `executor_runner.main.mm`
Expand Down Expand Up @@ -107,8 +107,8 @@
}
Buffer buffer(tensor_meta->nbytes(), 0);
auto sizes = tensor_meta->sizes();
exec_aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast<int *>(sizes.data()), buffer.data());
exec_aten::Tensor tensor(&tensor_impl);
executorch::aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast<int *>(sizes.data()), buffer.data());
executorch::aten::Tensor tensor(&tensor_impl);
EValue input_value(std::move(tensor));
Error err = method.set_input(input_value, i);
if (err != Error::Ok) {
Expand All @@ -129,7 +129,7 @@ @interface CoreMLBackendDelegateTests : XCTestCase
@implementation CoreMLBackendDelegateTests

+ (void)setUp {
torch::executor::runtime_init();
executorch::runtime::runtime_init();
}

+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ @interface ETCoreMLAssetManagerTests : XCTestCase
@implementation ETCoreMLAssetManagerTests

+ (void)setUp {
torch::executor::runtime_init();
executorch::runtime::runtime_init();
}

- (void)setUp {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ @interface ETCoreMLModelDebuggerTests : XCTestCase
@implementation ETCoreMLModelDebuggerTests

+ (void)setUp {
torch::executor::runtime_init();
executorch::runtime::runtime_init();
}

+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ + (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString
}

- (void)setUp {
torch::executor::runtime_init();
executorch::runtime::runtime_init();
@autoreleasepool {
NSError *localError = nil;
self.fileManager = [[NSFileManager alloc] init];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ @interface ETCoreMLModelProfilerTests : XCTestCase
@implementation ETCoreMLModelProfilerTests

+ (void)setUp {
torch::executor::runtime_init();
executorch::runtime::runtime_init();
}

+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {
Expand Down
4 changes: 2 additions & 2 deletions examples/apple/coreml/executor_runner/main.mm
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,13 @@ static inline id check_class(id obj, Class cls) {

#define SAFE_CAST(Object, Type) ((Type *)check_class(Object, [Type class]))

using executorch::backends::coreml::CoreMLBackendDelegate;
using executorch::etdump::ETDumpGen;
using executorch::etdump::ETDumpResult;
using executorch::extension::FileDataLoader;
using executorch::runtime::DataLoader;
using executorch::runtime::EValue;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::EventTracer;
using executorch::runtime::EventTracerDebugLogLevel;
using executorch::runtime::FreeableBuffer;
Expand All @@ -42,7 +43,6 @@ static inline id check_class(id obj, Class cls) {
using executorch::runtime::Result;
using executorch::runtime::Span;
using executorch::runtime::TensorInfo;
using torch::executor::CoreMLBackendDelegate;

static constexpr size_t kRuntimeMemorySize = 16 * 1024U * 1024U; // 16 MB

Expand Down
Loading