|
138 | 138 | "metadata": {}, |
139 | 139 | "outputs": [], |
140 | 140 | "source": [ |
141 | | - "core.get_property(\"GPU\", \"FULL_DEVICE_NAME\")" |
| 141 | + "device = \"GPU\"\n", |
| 142 | + "\n", |
| 143 | + "core.get_property(device, \"FULL_DEVICE_NAME\")" |
142 | 144 | ] |
143 | 145 | }, |
144 | 146 | { |
|
156 | 158 | "metadata": {}, |
157 | 159 | "outputs": [], |
158 | 160 | "source": [ |
159 | | - "device = \"GPU\"\n", |
160 | | - "\n", |
161 | 161 | "print(f\"{device} SUPPORTED_PROPERTIES:\\n\")\n", |
162 | 162 | "supported_properties = core.get_property(device, \"SUPPORTED_PROPERTIES\")\n", |
163 | 163 | "indent = len(max(supported_properties, key=len))\n", |
|
230 | 230 | "id": "821e29d1-6021-4552-a926-cb35bd6af776", |
231 | 231 | "metadata": {}, |
232 | 232 | "source": [ |
233 | | - "In order to compile a model, we actually need one to play with. For this tutorial we will use the [ssdlite_mobilenet_v2 model](https://docs.openvino.ai/latest/omz_models_model_ssdlite_mobilenet_v2.html) from the Open Model Zoo. The following code downloads the model with [Model Downloader](https://docs.openvino.ai/latest/omz_tools_downloader.html#model-downloader-usage) and converts it to OpenVINO IR format using [Model Converter](https://docs.openvino.ai/latest/omz_tools_downloader.html#model-converter-usage)." |
| 233 | + "This tutorial use the model `ssdlite_mobilenet_v2 model`. The ssdlite_mobilenet_v2 model is used for object detection. The model was trained on [Common Objects in Context (COCO)](https://cocodataset.org/#home) dataset version with 91 categories of object. For details, see the [paper](https://arxiv.org/abs/1801.04381)." |
234 | 234 | ] |
235 | 235 | }, |
236 | 236 | { |
|
240 | 240 | "tags": [] |
241 | 241 | }, |
242 | 242 | "source": [ |
243 | | - "#### Download the Model" |
| 243 | + "#### Download and unpack the Model\n", |
| 244 | + "\n", |
| 245 | + "Use the function `download_file` from the `notebook_utils` to download archive with model. It automatically creates a directory structure and downloads the selected model. This step is skipped if the package is already downloaded." |
244 | 246 | ] |
245 | 247 | }, |
246 | 248 | { |
|
250 | 252 | "metadata": {}, |
251 | 253 | "outputs": [], |
252 | 254 | "source": [ |
| 255 | + "import sys\n", |
| 256 | + "import tarfile\n", |
253 | 257 | "from pathlib import Path\n", |
254 | 258 | "\n", |
255 | | - "# Name of the model from Open Model Zoo\n", |
256 | | - "model_name = \"ssdlite_mobilenet_v2\"\n", |
| 259 | + "sys.path.append(\"../utils\")\n", |
257 | 260 | "\n", |
258 | | - "# Directory where the model will be downloaded\n", |
259 | | - "base_model_dir = Path(\"model\")\n", |
| 261 | + "import notebook_utils as utils\n", |
260 | 262 | "\n", |
261 | | - "downloaded_model_path = base_model_dir / \"public\" / model_name\n", |
| 263 | + "# A directory where the model will be downloaded.\n", |
| 264 | + "base_model_dir = Path(\"./model\").expanduser()\n", |
262 | 265 | "\n", |
| 266 | + "model_name = \"ssdlite_mobilenet_v2\"\n", |
| 267 | + "archive_name = Path(f\"{model_name}_coco_2018_05_09.tar.gz\")\n", |
| 268 | + "\n", |
| 269 | + "# Download the archive\n", |
| 270 | + "downloaded_model_path = base_model_dir / archive_name\n", |
263 | 271 | "if not downloaded_model_path.exists():\n", |
264 | | - " download_command = f\"omz_downloader \" \\\n", |
265 | | - " f\"--name {model_name} \" \\\n", |
266 | | - " f\"--output_dir {base_model_dir} \" \\\n", |
267 | | - " f\"--cache_dir {base_model_dir}\"\n", |
268 | | - " ! $download_command" |
| 272 | + " model_url = f\"http://download.tensorflow.org/models/object_detection/{archive_name}\"\n", |
| 273 | + " utils.download_file(model_url, downloaded_model_path.name, downloaded_model_path.parent)\n", |
| 274 | + "\n", |
| 275 | + "# Unpack the model\n", |
| 276 | + "tf_model_path = base_model_dir / archive_name.with_suffix(\"\").stem / \"frozen_inference_graph.pb\"\n", |
| 277 | + "if not tf_model_path.exists():\n", |
| 278 | + " with tarfile.open(downloaded_model_path) as file:\n", |
| 279 | + " file.extractall(base_model_dir)" |
269 | 280 | ] |
270 | 281 | }, |
271 | 282 | { |
|
275 | 286 | "tags": [] |
276 | 287 | }, |
277 | 288 | "source": [ |
278 | | - "#### Convert the Model to OpenVINO IR format" |
| 289 | + "#### Convert the Model to OpenVINO IR format\n", |
| 290 | + "\n", |
| 291 | + "Use Model Optimizer to convert the model to OpenVINO IR with `FP16` precision. The models are saved to the `model/ir_model/` directory. For more information about Model Optimizer, see the [Model Optimizer Developer Guide](https://docs.openvino.ai/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)." |
279 | 292 | ] |
280 | 293 | }, |
281 | 294 | { |
|
285 | 298 | "metadata": {}, |
286 | 299 | "outputs": [], |
287 | 300 | "source": [ |
288 | | - "precision = \"FP16\"\n", |
| 301 | + "from openvino.tools import mo\n", |
| 302 | + "from openvino.runtime import serialize\n", |
| 303 | + "from openvino.tools.mo.front import tf as ov_tf_front\n", |
289 | 304 | "\n", |
290 | | - "# Output path for the conversion\n", |
291 | | - "model_path = downloaded_model_path / precision / (model_name + \".xml\")\n", |
| 305 | + "precision = 'FP16'\n", |
292 | 306 | "\n", |
| 307 | + "# The output path for the conversion.\n", |
| 308 | + "model_path = base_model_dir / 'ir_model' / f'{model_name}_{precision.lower()}.xml'\n", |
| 309 | + "\n", |
| 310 | + "trans_config_path = Path(ov_tf_front.__file__).parent / \"ssd_v2_support.json\"\n", |
| 311 | + "pipeline_config = base_model_dir / archive_name.with_suffix(\"\").stem / \"pipeline.config\"\n", |
| 312 | + "\n", |
| 313 | + "model = None\n", |
293 | 314 | "if not model_path.exists():\n", |
294 | | - " convert_command = f\"omz_converter \" \\\n", |
295 | | - " f\"--name {model_name} \" \\\n", |
296 | | - " f\"--download_dir {base_model_dir} \" \\\n", |
297 | | - " f\"--precisions {precision}\"\n", |
298 | | - " ! $convert_command" |
| 315 | + " model = mo.convert_model(input_model=tf_model_path,\n", |
| 316 | + " output_dir=base_model_dir / 'ir_model',\n", |
| 317 | + " model_name=f'{model_name}_{precision.lower()}',\n", |
| 318 | + " input_shape=[1, 300, 300, 3],\n", |
| 319 | + " layout='NHWC',\n", |
| 320 | + " compress_to_fp16=True if precision == 'FP16' else False,\n", |
| 321 | + " transformations_config=trans_config_path,\n", |
| 322 | + " tensorflow_object_detection_api_pipeline_config=pipeline_config,\n", |
| 323 | + " reverse_input_channels=True)\n", |
| 324 | + " serialize(model, str(model_path))\n", |
| 325 | + " print(\"IR model saved to {}\".format(model_path))\n", |
| 326 | + "else:\n", |
| 327 | + " print(\"Read IR model from {}\".format(model_path))\n", |
| 328 | + " model = core.read_model(model_path)" |
299 | 329 | ] |
300 | 330 | }, |
301 | 331 | { |
|
323 | 353 | "metadata": {}, |
324 | 354 | "outputs": [], |
325 | 355 | "source": [ |
326 | | - "model = core.read_model(model=model_path)\n", |
327 | | - "compiled_model = core.compile_model(model, \"GPU\")" |
| 356 | + "compiled_model = core.compile_model(model, device)" |
328 | 357 | ] |
329 | 358 | }, |
330 | 359 | { |
|
373 | 402 | "\n", |
374 | 403 | "# Compile the model as before\n", |
375 | 404 | "model = core.read_model(model=model_path)\n", |
376 | | - "compiled_model = core.compile_model(model, \"GPU\")\n", |
| 405 | + "compiled_model = core.compile_model(model, device)\n", |
377 | 406 | "print(f\"Cache enabled (first time) - compile time: {time.time() - start}s\")" |
378 | 407 | ] |
379 | 408 | }, |
|
396 | 425 | "core = Core()\n", |
397 | 426 | "core.set_property({'CACHE_DIR': 'cache'})\n", |
398 | 427 | "model = core.read_model(model=model_path)\n", |
399 | | - "compiled_model = core.compile_model(model, \"GPU\")\n", |
| 428 | + "compiled_model = core.compile_model(model, device)\n", |
400 | 429 | "print(f\"Cache enabled - compile time: {time.time() - start}s\")\n", |
401 | 430 | "\n", |
402 | 431 | "start = time.time()\n", |
403 | 432 | "core = Core()\n", |
404 | 433 | "model = core.read_model(model=model_path)\n", |
405 | | - "compiled_model = core.compile_model(model, \"GPU\")\n", |
| 434 | + "compiled_model = core.compile_model(model, device)\n", |
406 | 435 | "print(f\"Cache disabled - compile time: {time.time() - start}s\")" |
407 | 436 | ] |
408 | 437 | }, |
|
447 | 476 | "metadata": {}, |
448 | 477 | "outputs": [], |
449 | 478 | "source": [ |
450 | | - "compiled_model = core.compile_model(model, \"GPU\", {\"PERFORMANCE_HINT\": \"LATENCY\"})" |
| 479 | + "compiled_model = core.compile_model(model, device, {\"PERFORMANCE_HINT\": \"LATENCY\"})" |
451 | 480 | ] |
452 | 481 | }, |
453 | 482 | { |
|
465 | 494 | "metadata": {}, |
466 | 495 | "outputs": [], |
467 | 496 | "source": [ |
468 | | - "compiled_model = core.compile_model(model, \"GPU\", {\"PERFORMANCE_HINT\": \"THROUGHPUT\"})" |
| 497 | + "compiled_model = core.compile_model(model, device, {\"PERFORMANCE_HINT\": \"THROUGHPUT\"})" |
469 | 498 | ] |
470 | 499 | }, |
471 | 500 | { |
|
0 commit comments