Skip to content

Commit b6a23ca

Browse files
authored
Avoid OMZ in 106 and 108 notebooks (openvinotoolkit#865)
1 parent c835f84 commit b6a23ca

File tree

2 files changed

+121
-53
lines changed

2 files changed

+121
-53
lines changed

notebooks/106-auto-device/106-auto-device.ipynb

Lines changed: 61 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,8 @@
2525
"metadata": {},
2626
"source": [
2727
"## Download and convert the model\n",
28-
"This tutorial uses the [googlenet-v1](https://docs.openvino.ai/latest/omz_models_model_googlenet_v1.html) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/). The googlenet-v1 model is the first of the [Inception](https://github.com/tensorflow/tpu/tree/master/models/experimental/inception) family of models designed to perform image classification. Like other Inception models, googlenet-v1 was pre-trained on the [ImageNet](https://image-net.org/) data set. For more details about this family of models, see the [research paper](https://arxiv.org/abs/1512.00567).\n",
2928
"\n",
30-
"The following code downloads googlenet-v1 and converts it to OpenVINO IR format `(model/public/googlenet-v1/FP16/googlenet-v1.xml)`. For more information about Open Model Zoo tools, refer to the [104-model-tools](../104-model-tools/README.md) tutorial."
29+
"This tutorial uses the [bvlc_googlenet](https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet) model. The bvlc_googlenet model is the first of the [Inception](https://github.com/tensorflow/tpu/tree/master/models/experimental/inception) family of models designed to perform image classification. Like other Inception models, bvlc_googlenet was pre-trained on the [ImageNet](https://image-net.org/) data set. For more details about this family of models, see the [research paper](https://arxiv.org/abs/1512.00567)."
3130
]
3231
},
3332
{
@@ -37,39 +36,47 @@
3736
"metadata": {},
3837
"outputs": [],
3938
"source": [
39+
"import sys\n",
40+
"\n",
4041
"from pathlib import Path\n",
42+
"from openvino.tools import mo\n",
43+
"from openvino.runtime import serialize\n",
4144
"from IPython.display import Markdown, display\n",
4245
"\n",
43-
"model_name = \"googlenet-v1\"\n",
46+
"sys.path.append(\"../utils\")\n",
47+
"\n",
48+
"import notebook_utils as utils\n",
49+
"\n",
4450
"base_model_dir = Path(\"./model\").expanduser()\n",
45-
"precision = \"FP16\"\n",
4651
"\n",
47-
"download_command = (\n",
48-
" f\"omz_downloader --name {model_name} --output_dir {base_model_dir}\"\n",
49-
")\n",
50-
"display(Markdown(f\"Download command: `{download_command}`\"))\n",
51-
"display(Markdown(f\"Downloading {model_name}...\"))\n",
52+
"model_name = \"bvlc_googlenet\"\n",
53+
"caffemodel_name = f'{model_name}.caffemodel'\n",
54+
"prototxt_name = f'{model_name}.prototxt'\n",
5255
"\n",
53-
"# For connections that require a proxy server\n",
54-
"# uncomment the following two lines and add the correct proxy addresses (if they are required).\n",
55-
"# %env https_proxy=http://proxy\n",
56-
"# %env http_proxy=http://proxy\n",
56+
"caffemodel_path = base_model_dir / caffemodel_name\n",
57+
"prototxt_path = base_model_dir / prototxt_name\n",
5758
"\n",
58-
"! $download_command\n",
59+
"if not caffemodel_path.exists() or not prototxt_path.exists():\n",
60+
" caffemodel_url = \"http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel\"\n",
61+
" prototxt_url = \"https://raw.githubusercontent.com/BVLC/caffe/88c96189bcbf3853b93e2b65c7b5e4948f9d5f67/models/bvlc_googlenet/deploy.prototxt\"\n",
5962
"\n",
60-
"convert_command = f\"omz_converter --name {model_name} --precisions {precision} --download_dir {base_model_dir}\"\n",
61-
"display(Markdown(f\"Convert command: `{convert_command}`\"))\n",
62-
"display(Markdown(f\"Converting {model_name}...\"))\n",
63+
" utils.download_file(caffemodel_url, caffemodel_name, base_model_dir)\n",
64+
" utils.download_file(prototxt_url, prototxt_name, base_model_dir)\n",
65+
"else:\n",
66+
" print(f'{caffemodel_name} and {prototxt_name} already downloaded to {base_model_dir}')\n",
6367
"\n",
64-
"! $convert_command"
68+
"# postprocessing of model\n",
69+
"text = prototxt_path.read_text()\n",
70+
"text = text.replace('dim: 10', 'dim: 1')\n",
71+
"res = prototxt_path.write_text(text)"
6572
]
6673
},
6774
{
6875
"cell_type": "markdown",
6976
"id": "fcfc461c",
7077
"metadata": {},
7178
"source": [
72-
"## Import modules"
79+
"## Import modules and create Core"
7380
]
7481
},
7582
{
@@ -92,6 +99,41 @@
9299
" display(Markdown('<div class=\"alert alert-block alert-danger\"><b>Warning: </b> A GPU device is not available. This notebook requires GPU device to have meaningful results. </div>'))"
93100
]
94101
},
102+
{
103+
"cell_type": "markdown",
104+
"id": "ee513ee2",
105+
"metadata": {},
106+
"source": [
107+
"## Convert the model to OpenVINO IR format\n",
108+
"\n",
109+
"Use Model Optimizer to convert the caffe model to OpenVINO IR with `FP16` precision. The models are saved to the `model/ir_model/` directory. For more information about Model Optimizer, see the [Model Optimizer Developer Guide](https://docs.openvino.ai/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)."
110+
]
111+
},
112+
{
113+
"cell_type": "code",
114+
"execution_count": null,
115+
"id": "ed2789f7",
116+
"metadata": {},
117+
"outputs": [],
118+
"source": [
119+
"ir_model_path = base_model_dir / 'ir_model' / f'{model_name}.xml'\n",
120+
"model = None\n",
121+
"\n",
122+
"if not ir_model_path.exists():\n",
123+
" model = mo.convert_model(input_model=base_model_dir / caffemodel_name,\n",
124+
" input_proto=base_model_dir / prototxt_name,\n",
125+
" input_shape=[1, 3, 224, 224],\n",
126+
" layout=\"NCHW\",\n",
127+
" mean_values=[104.0,117.0,123.0],\n",
128+
" output=\"prob\",\n",
129+
" compress_to_fp16=True)\n",
130+
" serialize(model, str(ir_model_path))\n",
131+
" print(\"IR model saved to {}\".format(ir_model_path))\n",
132+
"else:\n",
133+
" print(\"Read IR model from {}\".format(ir_model_path))\n",
134+
" model = ie.read_model(ir_model_path)"
135+
]
136+
},
95137
{
96138
"cell_type": "markdown",
97139
"id": "740bfdd8",
@@ -112,9 +154,6 @@
112154
"# Set LOG_LEVEL to LOG_INFO.\n",
113155
"ie.set_property(\"AUTO\", {\"LOG_LEVEL\":\"LOG_INFO\"})\n",
114156
"\n",
115-
"# Read the model.\n",
116-
"model = ie.read_model(model=\"model/public/googlenet-v1/FP16/googlenet-v1.xml\")\n",
117-
"\n",
118157
"# Load the model onto the target device.\n",
119158
"compiled_model = ie.compile_model(model=model)\n",
120159
"\n",

notebooks/108-gpu-device/108-gpu-device.ipynb

Lines changed: 60 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,9 @@
138138
"metadata": {},
139139
"outputs": [],
140140
"source": [
141-
"core.get_property(\"GPU\", \"FULL_DEVICE_NAME\")"
141+
"device = \"GPU\"\n",
142+
"\n",
143+
"core.get_property(device, \"FULL_DEVICE_NAME\")"
142144
]
143145
},
144146
{
@@ -156,8 +158,6 @@
156158
"metadata": {},
157159
"outputs": [],
158160
"source": [
159-
"device = \"GPU\"\n",
160-
"\n",
161161
"print(f\"{device} SUPPORTED_PROPERTIES:\\n\")\n",
162162
"supported_properties = core.get_property(device, \"SUPPORTED_PROPERTIES\")\n",
163163
"indent = len(max(supported_properties, key=len))\n",
@@ -230,7 +230,7 @@
230230
"id": "821e29d1-6021-4552-a926-cb35bd6af776",
231231
"metadata": {},
232232
"source": [
233-
"In order to compile a model, we actually need one to play with. For this tutorial we will use the [ssdlite_mobilenet_v2 model](https://docs.openvino.ai/latest/omz_models_model_ssdlite_mobilenet_v2.html) from the Open Model Zoo. The following code downloads the model with [Model Downloader](https://docs.openvino.ai/latest/omz_tools_downloader.html#model-downloader-usage) and converts it to OpenVINO IR format using [Model Converter](https://docs.openvino.ai/latest/omz_tools_downloader.html#model-converter-usage)."
233+
"This tutorial use the model `ssdlite_mobilenet_v2 model`. The ssdlite_mobilenet_v2 model is used for object detection. The model was trained on [Common Objects in Context (COCO)](https://cocodataset.org/#home) dataset version with 91 categories of object. For details, see the [paper](https://arxiv.org/abs/1801.04381)."
234234
]
235235
},
236236
{
@@ -240,7 +240,9 @@
240240
"tags": []
241241
},
242242
"source": [
243-
"#### Download the Model"
243+
"#### Download and unpack the Model\n",
244+
"\n",
245+
"Use the function `download_file` from the `notebook_utils` to download archive with model. It automatically creates a directory structure and downloads the selected model. This step is skipped if the package is already downloaded."
244246
]
245247
},
246248
{
@@ -250,22 +252,31 @@
250252
"metadata": {},
251253
"outputs": [],
252254
"source": [
255+
"import sys\n",
256+
"import tarfile\n",
253257
"from pathlib import Path\n",
254258
"\n",
255-
"# Name of the model from Open Model Zoo\n",
256-
"model_name = \"ssdlite_mobilenet_v2\"\n",
259+
"sys.path.append(\"../utils\")\n",
257260
"\n",
258-
"# Directory where the model will be downloaded\n",
259-
"base_model_dir = Path(\"model\")\n",
261+
"import notebook_utils as utils\n",
260262
"\n",
261-
"downloaded_model_path = base_model_dir / \"public\" / model_name\n",
263+
"# A directory where the model will be downloaded.\n",
264+
"base_model_dir = Path(\"./model\").expanduser()\n",
262265
"\n",
266+
"model_name = \"ssdlite_mobilenet_v2\"\n",
267+
"archive_name = Path(f\"{model_name}_coco_2018_05_09.tar.gz\")\n",
268+
"\n",
269+
"# Download the archive\n",
270+
"downloaded_model_path = base_model_dir / archive_name\n",
263271
"if not downloaded_model_path.exists():\n",
264-
" download_command = f\"omz_downloader \" \\\n",
265-
" f\"--name {model_name} \" \\\n",
266-
" f\"--output_dir {base_model_dir} \" \\\n",
267-
" f\"--cache_dir {base_model_dir}\"\n",
268-
" ! $download_command"
272+
" model_url = f\"http://download.tensorflow.org/models/object_detection/{archive_name}\"\n",
273+
" utils.download_file(model_url, downloaded_model_path.name, downloaded_model_path.parent)\n",
274+
"\n",
275+
"# Unpack the model\n",
276+
"tf_model_path = base_model_dir / archive_name.with_suffix(\"\").stem / \"frozen_inference_graph.pb\"\n",
277+
"if not tf_model_path.exists():\n",
278+
" with tarfile.open(downloaded_model_path) as file:\n",
279+
" file.extractall(base_model_dir)"
269280
]
270281
},
271282
{
@@ -275,7 +286,9 @@
275286
"tags": []
276287
},
277288
"source": [
278-
"#### Convert the Model to OpenVINO IR format"
289+
"#### Convert the Model to OpenVINO IR format\n",
290+
"\n",
291+
"Use Model Optimizer to convert the model to OpenVINO IR with `FP16` precision. The models are saved to the `model/ir_model/` directory. For more information about Model Optimizer, see the [Model Optimizer Developer Guide](https://docs.openvino.ai/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)."
279292
]
280293
},
281294
{
@@ -285,17 +298,34 @@
285298
"metadata": {},
286299
"outputs": [],
287300
"source": [
288-
"precision = \"FP16\"\n",
301+
"from openvino.tools import mo\n",
302+
"from openvino.runtime import serialize\n",
303+
"from openvino.tools.mo.front import tf as ov_tf_front\n",
289304
"\n",
290-
"# Output path for the conversion\n",
291-
"model_path = downloaded_model_path / precision / (model_name + \".xml\")\n",
305+
"precision = 'FP16'\n",
292306
"\n",
307+
"# The output path for the conversion.\n",
308+
"model_path = base_model_dir / 'ir_model' / f'{model_name}_{precision.lower()}.xml'\n",
309+
"\n",
310+
"trans_config_path = Path(ov_tf_front.__file__).parent / \"ssd_v2_support.json\"\n",
311+
"pipeline_config = base_model_dir / archive_name.with_suffix(\"\").stem / \"pipeline.config\"\n",
312+
"\n",
313+
"model = None\n",
293314
"if not model_path.exists():\n",
294-
" convert_command = f\"omz_converter \" \\\n",
295-
" f\"--name {model_name} \" \\\n",
296-
" f\"--download_dir {base_model_dir} \" \\\n",
297-
" f\"--precisions {precision}\"\n",
298-
" ! $convert_command"
315+
" model = mo.convert_model(input_model=tf_model_path,\n",
316+
" output_dir=base_model_dir / 'ir_model',\n",
317+
" model_name=f'{model_name}_{precision.lower()}',\n",
318+
" input_shape=[1, 300, 300, 3],\n",
319+
" layout='NHWC',\n",
320+
" compress_to_fp16=True if precision == 'FP16' else False,\n",
321+
" transformations_config=trans_config_path,\n",
322+
" tensorflow_object_detection_api_pipeline_config=pipeline_config,\n",
323+
" reverse_input_channels=True)\n",
324+
" serialize(model, str(model_path))\n",
325+
" print(\"IR model saved to {}\".format(model_path))\n",
326+
"else:\n",
327+
" print(\"Read IR model from {}\".format(model_path))\n",
328+
" model = core.read_model(model_path)"
299329
]
300330
},
301331
{
@@ -323,8 +353,7 @@
323353
"metadata": {},
324354
"outputs": [],
325355
"source": [
326-
"model = core.read_model(model=model_path)\n",
327-
"compiled_model = core.compile_model(model, \"GPU\")"
356+
"compiled_model = core.compile_model(model, device)"
328357
]
329358
},
330359
{
@@ -373,7 +402,7 @@
373402
"\n",
374403
"# Compile the model as before\n",
375404
"model = core.read_model(model=model_path)\n",
376-
"compiled_model = core.compile_model(model, \"GPU\")\n",
405+
"compiled_model = core.compile_model(model, device)\n",
377406
"print(f\"Cache enabled (first time) - compile time: {time.time() - start}s\")"
378407
]
379408
},
@@ -396,13 +425,13 @@
396425
"core = Core()\n",
397426
"core.set_property({'CACHE_DIR': 'cache'})\n",
398427
"model = core.read_model(model=model_path)\n",
399-
"compiled_model = core.compile_model(model, \"GPU\")\n",
428+
"compiled_model = core.compile_model(model, device)\n",
400429
"print(f\"Cache enabled - compile time: {time.time() - start}s\")\n",
401430
"\n",
402431
"start = time.time()\n",
403432
"core = Core()\n",
404433
"model = core.read_model(model=model_path)\n",
405-
"compiled_model = core.compile_model(model, \"GPU\")\n",
434+
"compiled_model = core.compile_model(model, device)\n",
406435
"print(f\"Cache disabled - compile time: {time.time() - start}s\")"
407436
]
408437
},
@@ -447,7 +476,7 @@
447476
"metadata": {},
448477
"outputs": [],
449478
"source": [
450-
"compiled_model = core.compile_model(model, \"GPU\", {\"PERFORMANCE_HINT\": \"LATENCY\"})"
479+
"compiled_model = core.compile_model(model, device, {\"PERFORMANCE_HINT\": \"LATENCY\"})"
451480
]
452481
},
453482
{
@@ -465,7 +494,7 @@
465494
"metadata": {},
466495
"outputs": [],
467496
"source": [
468-
"compiled_model = core.compile_model(model, \"GPU\", {\"PERFORMANCE_HINT\": \"THROUGHPUT\"})"
497+
"compiled_model = core.compile_model(model, device, {\"PERFORMANCE_HINT\": \"THROUGHPUT\"})"
469498
]
470499
},
471500
{

0 commit comments

Comments
 (0)