@@ -235,245 +235,6 @@ def export_ir(output_dir):
235235 openvino_tensorflow_lib .freeClusterInfo ()
236236
237237 return cluster_string
238-
239- def optimize_graph_with_openvino_tf1 (frozen_model_file ,
240- output_node_names ,
241- ):
242- """
243- Rewrites the tf.Graph of the frozen model with the OpenVINOGrapplerOptimizer.
244-
245- Example usage:
246-
247- >>> import openvino_tensorflow as ovtf
248- >>> pb_file = "inception_v3_2016_08_28_frozen.pb"
249- >>> output_names = ['InceptionV3/Predictions/Reshape_1']
250- >>> model = ovtf.optimize_graph_with_openvino_tf1(pb_file, output_names)
251- >>> with tf.compat.v1.Session() as sess:
252- prob_tensor = tf.import_graph_def(model, name='', return_elements=output_names)
253- preds = sess.run(prob_tensor, tf_inputs)
254-
255- Args:
256- frozen_model_file: Path to the frozen model file containing the graphdef to optimize
257- output_node_names: A list of output node names, which will be used as fetch nodes while
258- creating the GrapplerItem object
259-
260- Raises:
261- AssertionError: If the frozen model path is invalid
262- AssertionError: If a backend other than CPU is used
263-
264- Returns:
265- The optimized GraphDef
266- """
267-
268- if not ((TF_MAJOR_VERSION >= 2 ) and (TF_MINOR_VERSION >= 8 )):
269- raise AssertionError ("Only TF Versions >= 2.8.x are supported for the optimize_graph APIs" )
270-
271- if not os .path .exists (frozen_model_file ):
272- raise AssertionError ("Could not find frozen model path" )
273-
274- openvino_tensorflow_lib .disable_rewrite_pass ()
275-
276- if get_backend () != "CPU" :
277- raise AssertionError (("Offline TF Graph optimization with OpenVINOGrapplerOptimizer "
278- "is only available for the CPU backend."
279- "\n Consider removing the call to "
280- "optimize_graph_with_openvino_tf1 to use OpenVINO"
281- "on other backends." ))
282-
283- graph = tf .Graph ()
284- graph_def = tf .compat .v1 .GraphDef ()
285-
286- with tf .compat .v1 .gfile .GFile (frozen_model_file , "rb" ) as f :
287- graph_def .ParseFromString (f .read ())
288- with graph .as_default ():
289- importer .import_graph_def (graph_def , name = '' )
290-
291- meta_graph_def = saver .export_meta_graph (graph_def =
292- graph .as_graph_def (add_shapes = True ), graph = graph )
293-
294- fetch_collection = meta_graph_pb2 .CollectionDef ()
295- for array in output_node_names :
296- fetch_collection .node_list .value .append (array )
297-
298- # Grappler determines fetch ops from collection 'train_op'.
299- meta_graph_def .collection_def [ops .GraphKeys .TRAIN_OP ].CopyFrom (
300- fetch_collection )
301-
302- grappler_session_config = config_pb2 .ConfigProto ()
303- grappler_session_config .graph_options .rewrite_options .CopyFrom (rewriter_config )
304- optimized_graph_def = tf_optimizer .OptimizeGraph (grappler_session_config ,
305- meta_graph_def , graph_id = b"tf_graph" )
306-
307- return optimized_graph_def
308-
309- def optimize_graph_with_openvino_tf2 (saved_model_dir ,
310- input_tensors = None ,
311- saved_model_signature =
312- signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY ,
313- saved_model_tag = tag_constants .SERVING ,
314- save_optimized_function_signature = False
315- ):
316- """
317- Rewrites the tf.Graph of a TF2 SavedModel Function Signature with the
318- OpenVINOGrapplerOptimizer. Expects a sample input tensor with a fully defined shape and
319- dtype, which will be used to create the input feeds of GrapplerItem used for CostAnalysis.
320-
321- Converts all Variable ops into Const ops, and inlines supported compute heavy subgraphs
322- as encapsulated OpenVINO custom ops. Returns a single ConcreteFunction specialized to
323- input shape and dtype of the provided 'input_tensor'.
324-
325- Example usage:
326-
327- >>> import openvino_tensorflow as ovtf
328- >>> model_path = "ssd_resnet101_v1_fpn_1024x1024"
329- >>> image_numpy = np.array(np.random.rand(1, 1024,1024,3)).astype(np.uint8)
330- >>> input_tensor = tf.convert_to_tensor(image_numpy, dtype=tf.uint8)
331- >>> model = ovtf.optimize_graph_with_openvino_tf2(model_path, input_tensor)
332- >>> print(model)
333- <ConcreteFunction pruned(args_0) at 0x>
334- >>> results = model(input_tensor)
335-
336- Args:
337- saved_model_dir: The SavedModel directory to load from.
338- input_tensors: A tf.Tensor, a list or a dict of tf.Tensor or numpy arrays, whose shape and
339- type will be used by OpenVINOGrapplerOptimizer for cost analysis.
340- saved_model_signature: SavedModel tag to load
341- saved_model_tag: The SavedModel function signature key, whose graph will be optimized
342- save_optimized_function_signature: Whether to save the new optimized function signature to
343- the model at 'saved_model_dir'
344-
345- Raises:
346- AssertionError: If the SavedModel path is invalid
347- AssertionError: If a backend other than CPU is used
348-
349- Returns:
350- The optimized TF ConcreteFunction object
351- """
352-
353- #[TODO] Add support for taking direct tf.Graph or tf.function inputs
354-
355- if not ((TF_MAJOR_VERSION >= 2 ) and (TF_MINOR_VERSION >= 8 )):
356- raise AssertionError ("Only TF Versions >= 2.8.x are supported for the optimize_graph APIs" )
357-
358- if not os .path .exists (saved_model_dir ):
359- raise AssertionError ("Could not find saved model path" )
360-
361- if get_backend () != "CPU" :
362- raise AssertionError (("Offline TF Graph optimization with OpenVINOGrapplerOptimizer "
363- "is only available for the CPU backend."
364- "\n Consider removing the call to "
365- "optimize_graph_with_openvino_tf2 to use OpenVINO"
366- "on other backends." ))
367-
368- openvino_tensorflow_lib .disable_rewrite_pass ()
369-
370- # prepare tf function from saved_model
371- # Load model with provided saved model tag
372- try :
373- # Try the provided tag or the default tag
374- saved_model = load .load (saved_model_dir , saved_model_tag )
375- except RuntimeError as e :
376- # Catch RuntimeError if failed to load tag
377- # Try skipping tag if the SavedModel contains a single MetaGraph,
378- # as for those exported from `tf.saved_model.save`.
379- if saved_model_tag == tag_constants .SERVING :
380- saved_model = load .load (saved_model_dir )
381- else :
382- raise RuntimeError (e )
383-
384- # form a concrete function with input tensor in it so grappler can do shape inference
385- # Select desired saved model function signature
386- try :
387- # try the provided signature or the default signature
388- print ("Available Saved Model Signatures: " , saved_model .signatures )
389- print ("Selecting Signature: " , saved_model_signature )
390-
391- func = tf .function (saved_model .signatures [saved_model_signature ])
392-
393- except KeyError as e :
394- # If the provided signature doesn't work,
395- # let tf.function try inferring available signatures
396- # If `None`, a separate function is instantiated for each inferred input signature
397- if saved_model_signature == signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY :
398- func = tf .function (saved_model )
399- else :
400- raise RuntimeError (e )
401-
402- # Handle all types of possible input tensors
403- if isinstance (input_tensors , dict ):
404- tensors = {name :(ops .convert_to_tensor (v ) if not isinstance (v , tf .Tensor ) else v )
405- for name , v in input_tensors .items ()}
406- func = tf .function (func )
407- args , kwargs = [], tensors
408- elif isinstance (input_tensors , list ):
409- tensors = [ops .convert_to_tensor (v ) if not isinstance (v , tf .Tensor ) else v
410- for v in input_tensors ]
411- input_signature = [tf .TensorSpec .from_tensor (v ) for v in tensors ]
412- func = tf .function (func , input_signature = input_signature )
413- args , kwargs = [], {}
414- else :
415- if not isinstance (input_tensors , tf .Tensor ):
416- tensors = ops .convert_to_tensor (input_tensors )
417- else :
418- tensors = input_tensors
419- input_signature = [tf .TensorSpec .from_tensor (tensors )]
420- func = tf .function (func , input_signature = input_signature )
421- args , kwargs = [], {}
422-
423- func = func .get_concrete_function (* args , ** kwargs )
424-
425- # Converting var2consts for larger models might take a long time
426- frozen_func = convert_to_constants .convert_variables_to_constants_v2 (func ,
427- lower_control_flow = False , aggressive_inlining = True )
428-
429- meta_graph_def = saver .export_meta_graph (graph_def =
430- frozen_func .graph .as_graph_def (add_shapes = True ),
431- graph = frozen_func .graph )
432-
433- fetch_collection = meta_graph_pb2 .CollectionDef ()
434- for array in frozen_func .outputs :
435- fetch_collection .node_list .value .append (array .name )
436-
437- # Grappler determines fetch ops from collection 'train_op'.
438- meta_graph_def .collection_def [ops .GraphKeys .TRAIN_OP ].CopyFrom (
439- fetch_collection )
440-
441- grappler_session_config = config_pb2 .ConfigProto ()
442- grappler_session_config .graph_options .rewrite_options .CopyFrom (rewriter_config )
443- optimized_graph_def = tf_optimizer .OptimizeGraph (grappler_session_config ,
444- meta_graph_def , graph_id = b"tf_graph" )
445-
446- # Swap original function with optimized function in TF's context
447- for f in optimized_graph_def .library .function :
448- while context .context ().has_function (f .signature .name ):
449- context .context ().remove_function (f .signature .name )
450-
451- optimized_func = wrap_function .function_from_graph_def (
452- optimized_graph_def ,
453- [tensor .name for tensor in frozen_func .inputs ],
454- [tensor .name for tensor in frozen_func .outputs ])
455-
456- optimized_func .graph .structured_outputs = nest .pack_sequence_as (
457- func .graph .structured_outputs ,
458- optimized_func .graph .structured_outputs )
459-
460- optimized_func .graph .structured_input_signature = (
461- func .structured_input_signature )
462-
463- # Rewrite the signature map using the optimized ConcreteFunction.
464- signatures = {
465- key : value for key , value in saved_model .signatures .items ()
466- }
467- signatures ["ovtf" ] = optimized_func
468-
469- # Save the optimized function for later use
470- # Sometimes this is useful when start-up overheads from this function call
471- # needs to be avoided
472- if save_optimized_function_signature :
473- save .save (saved_model , saved_model_dir )
474- return optimized_func
475- else :
476- return optimized_func
477238
478239 __version__ = \
479240 "OpenVINO integration with TensorFlow version: " + str (openvino_tensorflow_lib .version ()) \
0 commit comments