Skip to content
This repository was archived by the owner on Jan 3, 2023. It is now read-only.

Commit c4cb4b1

Browse files
authored
Merge pull request #3148 from NervanaSystems/tomdol/plaidml_r0.23
Decomposition of fused ops in PlaidML backend [r0.23]
2 parents 51f57d4 + a8b170b commit c4cb4b1

File tree

2 files changed

+28
-0
lines changed

2 files changed

+28
-0
lines changed

src/ngraph/runtime/plaidml/plaidml_compiler.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "ngraph/pass/algebraic_simplification.hpp"
2121
#include "ngraph/pass/core_fusion.hpp"
2222
#include "ngraph/pass/cse.hpp"
23+
#include "ngraph/pass/fused_op_decomposition.hpp"
2324
#include "ngraph/pass/get_output_element_elimination.hpp"
2425
#include "ngraph/pass/like_replacement.hpp"
2526
#include "ngraph/pass/liveness.hpp"
@@ -87,6 +88,7 @@ std::shared_ptr<ngraph::runtime::plaidml::PlaidML_Executable>
8788
ngraph::pass::Manager pass_manager;
8889

8990
// We apply the same general-purposes passes as the CPU backend.
91+
pass_manager.register_pass<ngraph::pass::FusedOpDecomposition>();
9092
pass_manager.register_pass<ngraph::pass::LikeReplacement>();
9193
pass_manager.register_pass<ngraph::pass::NopElimination>();
9294
pass_manager.register_pass<ngraph::pass::ZeroDimTensorElimination>();

src/ngraph/runtime/plaidml/unit_test.manifest

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,3 +253,29 @@ group_conv_transpose
253253
group_conv_transpose_output_shape
254254
divide_python_rounding_int32
255255
backwards_batchmatmul_tensor2_tensor2
256+
257+
# unsupported ops: 'QuantizedConvolution', 'QuantizedDot', 'TopK', 'Erf', 'EmbeddingLookup'
258+
model_quant_conv_linear
259+
model_conv_integer_no_zero_point
260+
model_matmul_integer_no_zero_point
261+
model_matmul_integer_4d_no_zero_point
262+
model_top_k
263+
model_erf
264+
model_erf_int32
265+
model_hardmax
266+
267+
# node validation error: "Argument shapes are inconsistent."
268+
model_lstm_fwd_with_clip
269+
model_lstm_fwd_mixed_seq
270+
model_lstm_fwd_hardsigmoid_activation
271+
model_reduce_log_sum
272+
model_reduce_log_sum_exp
273+
model_reduce_mean
274+
275+
# result mismatch
276+
model_dequantize_linear_scalar_zero_scale_int8
277+
model_softmax
278+
avg_pool_3d_uneven_strided_padded
279+
rnn_cell_activation_function
280+
gru_cell_bias_clip
281+
gru_cell_linear_before_reset

0 commit comments

Comments
 (0)