Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,12 @@

public:
bool SupportsMLProgram() const override { return true; }

// Cast is shape-only data movement from CoreML's perspective: per-element
// dtype conversion that the marshalling overhead dominates for small

Check warning on line 27 in onnxruntime/core/providers/coreml/builders/impl/cast_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint

[misspell] reported by reviewdog 🐶 "marshalling" is a misspelling of "marshaling" Raw Output: ./onnxruntime/core/providers/coreml/builders/impl/cast_op_builder.cc:27:31: "marshalling" is a misspelling of "marshaling"
// tensors. CoreML claims it but a partition consisting only of Casts
// doesn't earn its own marshalling cost.

Check warning on line 29 in onnxruntime/core/providers/coreml/builders/impl/cast_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint

[misspell] reported by reviewdog 🐶 "marshalling" is a misspelling of "marshaling" Raw Output: ./onnxruntime/core/providers/coreml/builders/impl/cast_op_builder.cc:29:26: "marshalling" is a misspelling of "marshaling"
bool IsTrivial(const Node& /*node*/) const override { return true; }
};

Status CastOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuilder& model_builder,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ class FlattenOpBuilder : public BaseOpBuilder {

bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const override;

bool IsTrivial(const Node& /*node*/) const override { return true; }
};

Status FlattenOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/impl/base_op_builder.h"
#include "core/providers/coreml/builders/impl/builder_utils.h"
#include "core/providers/coreml/builders/model_builder.h"
#include "core/providers/coreml/builders/op_builder_factory.h"

namespace onnxruntime {
namespace coreml {

class IdentityOpBuilder : public BaseOpBuilder {
Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const override;

bool SupportsMLProgram() const override { return true; }

bool IsTrivial(const Node& /*node*/) const override { return true; }
};

Status IdentityOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& /*logger*/) const {
const auto& input_defs = node.InputDefs();
const auto& output_def = *node.OutputDefs()[0];

if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

Check warning on line 28 in onnxruntime/core/providers/coreml/builders/impl/identity_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Do not use namespace using-directives. Use using-declarations instead. [build/namespaces] [5] Raw Output: onnxruntime/core/providers/coreml/builders/impl/identity_op_builder.cc:28: Do not use namespace using-directives. Use using-declarations instead. [build/namespaces] [5]
auto op = model_builder.CreateOperation(node, "identity");
AddOperationInput(*op, "x", input_defs[0]->Name());
AddOperationOutput(*op, output_def);
model_builder.AddOperation(std::move(op));
} else {
// NeuralNetwork: emulate via activation LINEAR(alpha=1, beta=0).
auto layer = model_builder.CreateNNLayer(node);
auto* linear = layer->mutable_activation()->mutable_linear();
linear->set_alpha(1.0f);
linear->set_beta(0.0f);
*layer->mutable_input()->Add() = input_defs[0]->Name();
*layer->mutable_output()->Add() = output_def.Name();
model_builder.AddLayer(std::move(layer));

Check warning on line 41 in onnxruntime/core/providers/coreml/builders/impl/identity_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <utility> for move [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/identity_op_builder.cc:41: Add #include <utility> for move [build/include_what_you_use] [4]
}
return Status::OK();
}

void CreateIdentityOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) {

Check warning on line 46 in onnxruntime/core/providers/coreml/builders/impl/identity_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <string> for string [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/identity_op_builder.cc:46: Add #include <string> for string [build/include_what_you_use] [4]
op_registrations.builders.push_back(std::make_unique<IdentityOpBuilder>());
op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get());
}

} // namespace coreml
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class ReshapeOpBuilder : public BaseOpBuilder {
int GetMinSupportedOpSet(const Node& /* node */) const override { return 5; }

bool SupportsMLProgram() const override { return true; }

bool IsTrivial(const Node& /*node*/) const override { return true; }
};

void ReshapeOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ class SqueezeOpBuilder : public BaseOpBuilder {
bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const override;
bool SupportsMLProgram() const override { return true; }

// SqueezeOpBuilder handles both Squeeze and Unsqueeze; both are shape-only.
bool IsTrivial(const Node& /*node*/) const override { return true; }
};

namespace {
Expand Down
156 changes: 156 additions & 0 deletions onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#include "core/optimizer/initializer.h"
#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/impl/base_op_builder.h"
#include "core/providers/coreml/builders/impl/builder_utils.h"
#include "core/providers/coreml/builders/model_builder.h"
#include "core/providers/coreml/builders/op_builder_factory.h"
#include "core/providers/coreml/shape_utils.h"
#include "core/providers/shared/utils/utils.h"

namespace onnxruntime {
namespace coreml {

class TileOpBuilder : public BaseOpBuilder {
void AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const override;

Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const override;

bool HasSupportedInputsImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const override;

bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const override;

bool SupportsMLProgram() const override { return true; }

bool IsTrivial(const Node& /*node*/) const override { return true; }
};

void TileOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
// If 'repeats' is a constant initializer we bake it into the MIL constant
// and don't need the original to land in the model. If it's a runtime
// tensor the dynamic-shape MIL path consumes it directly.
if (model_builder.GetConstantInitializer(node.InputDefs()[1]->Name())) {
model_builder.AddInitializerToSkip(node.InputDefs()[1]->Name());
}
}

Status TileOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& /*logger*/) const {
const auto& input_defs = node.InputDefs();
const auto& output_def = *node.OutputDefs()[0];
const auto* repeats_init = model_builder.GetConstantInitializer(input_defs[1]->Name());

if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

Check warning on line 49 in onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Do not use namespace using-directives. Use using-declarations instead. [build/namespaces] [5] Raw Output: onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc:49: Do not use namespace using-directives. Use using-declarations instead. [build/namespaces] [5]
auto op = model_builder.CreateOperation(node, "tile");
AddOperationInput(*op, "x", input_defs[0]->Name());
if (repeats_init) {
Initializer unpacked(model_builder.GetGraphViewer().GetGraph(), *repeats_init);
auto repeats = unpacked.DataAsSpan<int64_t>();
AddOperationInput(*op, "reps", model_builder.AddConstant(op->type(), "reps", repeats));
} else {
// Runtime 'reps' (e.g. emitted by a Loop). Pass the tensor through.
AddOperationInput(*op, "reps", input_defs[1]->Name());
}
AddOperationOutput(*op, output_def);
model_builder.AddOperation(std::move(op));
} else {
if (!repeats_init) {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"TileOpBuilder NeuralNetwork path requires constant 'repeats'");
}
Initializer unpacked(model_builder.GetGraphViewer().GetGraph(), *repeats_init);
auto repeats = unpacked.DataAsSpan<int64_t>();
auto layer = model_builder.CreateNNLayer(node);
auto* tile_params = layer->mutable_tile();
for (int64_t r : repeats) {
tile_params->add_reps(r);
}
*layer->mutable_input()->Add() = input_defs[0]->Name();
*layer->mutable_output()->Add() = output_def.Name();
model_builder.AddLayer(std::move(layer));

Check warning on line 76 in onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <utility> for move [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc:76: Add #include <utility> for move [build/include_what_you_use] [4]
}
return Status::OK();
}

bool TileOpBuilder::HasSupportedInputsImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const {
// Tile is shape-only data movement, so it can carry any element type CoreML
// can represent. ONNX Tile is commonly used in graph post-processing on
// INT32 grid-index tensors (e.g. YOLO anchor expansion), which the default
// base check (float-only) would reject.
int32_t input_type;
if (!GetType(*node.InputDefs()[0], input_type, logger)) {
return false;
}
switch (input_type) {
case ONNX_NAMESPACE::TensorProto_DataType_FLOAT:
case ONNX_NAMESPACE::TensorProto_DataType_INT32:
case ONNX_NAMESPACE::TensorProto_DataType_INT64:
case ONNX_NAMESPACE::TensorProto_DataType_BOOL:
return true;
case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16:
if (input_params.create_mlprogram && input_params.coreml_version >= 6) {
return true;
}
[[fallthrough]];
default:
LOGS(logger, VERBOSE) << "[Tile] input type " << input_type << " is not supported";
return false;
}
}

bool TileOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const {
const auto& input_defs = node.InputDefs();

// The NeuralNetwork emitter only supports constant 'repeats'; the MLProgram
// path also accepts a runtime 'reps' tensor.
const auto& repeats_name = input_defs[1]->Name();
const auto* repeats_tensor = input_params.graph_viewer.GetConstantInitializer(repeats_name);
if (!input_params.create_mlprogram && !repeats_tensor) {
LOGS(logger, VERBOSE) << "Tile NeuralNetwork path requires 'repeats' to be a constant initializer";
return false;
}

std::vector<int64_t> input_shape;

Check warning on line 121 in onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <vector> for vector<> [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc:121: Add #include <vector> for vector<> [build/include_what_you_use] [4]
if (!GetShape(*input_defs[0], input_shape, logger)) {
return false;
}

if (input_shape.size() > 5) {
LOGS(logger, VERBOSE) << "Tile does not support input rank greater than 5. Input rank: " << input_shape.size();
return false;
}

if (repeats_tensor) {
Initializer unpacked(input_params.graph_viewer.GetGraph(), *repeats_tensor);
auto repeats = unpacked.DataAsSpan<int64_t>();
if (repeats.size() != input_shape.size()) {
LOGS(logger, VERBOSE) << "Tile 'repeats' length (" << repeats.size()
<< ") must match input rank (" << input_shape.size() << ")";
return false;
}
for (int64_t r : repeats) {
if (r < 1) {
LOGS(logger, VERBOSE) << "Tile 'repeats' values must be positive; got " << r;
return false;
}
}
}

return true;
}

void CreateTileOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) {

Check warning on line 150 in onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <string> for string [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/tile_op_builder.cc:150: Add #include <string> for string [build/include_what_you_use] [4]
op_registrations.builders.push_back(std::make_unique<TileOpBuilder>());
op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get());
}

} // namespace coreml
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ class TransposeOpBuilder : public BaseOpBuilder {
const logging::Logger& logger) const override;

bool SupportsMLProgram() const override { return true; }

bool IsTrivial(const Node& /*node*/) const override { return true; }
};

Status TransposeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@
bool SupportsMLProgram() const override { return true; }
bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const override;

// Of the unary ops this builder handles, only Ceil is cheap enough to count
// as trivial. Erf/Round/Exp/Reciprocal/Sqrt are all transcendental or
// multi-cycle ops and earn their own marshalling cost.

Check warning on line 24 in onnxruntime/core/providers/coreml/builders/impl/unary_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint

[misspell] reported by reviewdog 🐶 "marshalling" is a misspelling of "marshaling" Raw Output: ./onnxruntime/core/providers/coreml/builders/impl/unary_op_builder.cc:24:40: "marshalling" is a misspelling of "marshaling"
bool IsTrivial(const Node& node) const override { return node.OpType() == "Ceil"; }
};

Status UnaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
Expand All @@ -39,6 +44,8 @@
coreml_op_type = "round";
} else if (op_type == "Exp") {
coreml_op_type = "exp";
} else if (op_type == "Ceil") {
coreml_op_type = "ceil";
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"UnaryOpBuilder::AddToModelBuilderImpl, unexpected op: ", op_type);
Expand Down Expand Up @@ -82,7 +89,8 @@
bool UnaryOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& /*logger*/) const {
if (!input_params.create_mlprogram) {
if (node.OpType() == "Erf" || node.OpType() == "Round" || node.OpType() == "Exp") {
if (node.OpType() == "Erf" || node.OpType() == "Round" || node.OpType() == "Exp" ||
node.OpType() == "Ceil") {
return false;
}
}
Expand Down
8 changes: 8 additions & 0 deletions onnxruntime/core/providers/coreml/builders/op_builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,14 @@

// Does the builder implementation support creating an ML Program?
virtual bool SupportsMLProgram() const = 0;

// Is this op cheap enough that a CoreML partition consisting only of nodes
// like it isn't worth the marshalling cost? Used by the trivial-only

Check warning on line 49 in onnxruntime/core/providers/coreml/builders/op_builder.h

View workflow job for this annotation

GitHub Actions / Optional Lint

[misspell] reported by reviewdog 🐶 "marshalling" is a misspelling of "marshaling" Raw Output: ./onnxruntime/core/providers/coreml/builders/op_builder.h:49:29: "marshalling" is a misspelling of "marshaling"
// partition heuristic in CoreMLExecutionProvider::GetCapability. Defaults
// to false; trivial-op builders override to true. Some builders dispatch
// multiple op types (e.g. UnaryOpBuilder), so the answer can depend on
// node.OpType().
virtual bool IsTrivial(const Node& /*node*/) const { return false; }
};

} // namespace coreml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() {
CreateUnaryOpBuilder("Round", op_registrations);
CreateUnaryOpBuilder("Sqrt", op_registrations);
CreateUnaryOpBuilder("Exp", op_registrations);
CreateUnaryOpBuilder("Ceil", op_registrations);

// Binary elementwise ops
CreateBinaryOpBuilder("Add", op_registrations);
Expand Down Expand Up @@ -77,6 +78,7 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() {
CreateGatherOpBuilder("Gather", op_registrations);
CreateGemmOpBuilder("Gemm", op_registrations);
CreateGridSampleOpBuilder("GridSample", op_registrations);
CreateIdentityOpBuilder("Identity", op_registrations);
CreateLRNOpBuilder("LRN", op_registrations);
CreateGemmOpBuilder("MatMul", op_registrations);
CreatePadOpBuilder("Pad", op_registrations);
Expand All @@ -87,6 +89,7 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() {
CreateSplitOpBuilder("Split", op_registrations);
CreateSoftmaxOpBuilder("Softmax", op_registrations);
CreateSqueezeOpBuilder("Squeeze", op_registrations);
CreateTileOpBuilder("Tile", op_registrations);
CreateTransposeOpBuilder("Transpose", op_registrations);
CreateSqueezeOpBuilder("Unsqueeze", op_registrations);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ void CreateFlattenOpBuilder(const std::string& op_type, OpBuilderRegistrations&
void CreateGatherOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateGemmOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateGridSampleOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateIdentityOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateLRNOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreatePadOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreatePoolOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
Expand All @@ -42,6 +43,7 @@ void CreateSliceOpBuilder(const std::string& op_type, OpBuilderRegistrations& op
void CreateSoftmaxOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateSplitOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateSqueezeOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateTileOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateTransposeOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateUnaryOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
void CreateQuickGeluOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations);
Expand Down
28 changes: 26 additions & 2 deletions onnxruntime/core/providers/coreml/coreml_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "core/framework/tensorprotoutils.h"
#include "core/graph/graph_viewer.h"
#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/op_builder_factory.h"
#include "core/providers/partitioning_utils.h"
#include "core/session/onnxruntime_cxx_api.h"

Expand Down Expand Up @@ -88,9 +89,32 @@
return MakeString(user_provided_key, "_", COREML, "_", model_hash, "_", metadef_id);
};

result = utils::CreateSupportedPartitions(graph_viewer, supported_nodes, {},
// Drop CoreML partitions that consist entirely of trivial shape / cheap-elementwise ops.
// These ops can each be claimed individually but the CPU↔CoreML round-trip cost
// (~50-100us marshalling) outweighs the saving when the partition has no compute-heavy

Check warning on line 94 in onnxruntime/core/providers/coreml/coreml_execution_provider.cc

View workflow job for this annotation

GitHub Actions / Optional Lint

[misspell] reported by reviewdog 🐶 "marshalling" is a misspelling of "marshaling" Raw Output: ./onnxruntime/core/providers/coreml/coreml_execution_provider.cc:94:16: "marshalling" is a misspelling of "marshaling"
// op to amortise it over. Per-op CoreML dispatch cost is ~10-14us on M3 Max even for

Check warning on line 95 in onnxruntime/core/providers/coreml/coreml_execution_provider.cc

View workflow job for this annotation

GitHub Actions / Optional Lint

[misspell] reported by reviewdog 🐶 "amortise" is a misspelling of "amortize" Raw Output: ./onnxruntime/core/providers/coreml/coreml_execution_provider.cc:95:11: "amortise" is a misspelling of "amortize"
// trivial ops (Identity/Ceil/Tile etc.), and CPU runs them in <1us each.
//
// The "trivial" marker lives on each op builder's IOpBuilder::IsTrivial(node)
// override rather than as a hardcoded set here, so adding a new trivial op
// builder doesn't risk drifting from a list maintained at the EP level.
const auto& op_builders = coreml::GetOpBuilders();
const auto is_node_trivial = [&](const Node* node) -> bool {
auto it = op_builders.find(node->OpType());
return it != op_builders.end() && it->second->IsTrivial(*node);
};
const auto is_node_supported = [&](const Node& node) -> bool {
return supported_nodes.find(&node) != supported_nodes.end();
};
const auto on_group_closed = [&](const std::vector<const Node*>& group) -> bool {
// Keep the partition only if at least one node is non-trivial.
return std::any_of(group.begin(), group.end(),
[&](const Node* node) { return !is_node_trivial(node); });
};

result = utils::CreateSupportedPartitions(graph_viewer, is_node_supported, on_group_closed,
gen_metadef_name, COREML, kCoreMLExecutionProvider,
nullptr,
/*node_unit_map*/ nullptr,
/*drop_constant_initializers*/ true);

const auto num_of_partitions = result.size();
Expand Down
Loading
Loading