diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.classpath b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.classpath
deleted file mode 100755
index 2d295fa..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.classpath
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
-
-
-
-
-
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.cproject b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.cproject
deleted file mode 100755
index dd76b91..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.cproject
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.project b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.project
deleted file mode 100755
index 84b26ff..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.project
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
- Cartoonifier
-
-
-
-
-
- org.eclipse.cdt.managedbuilder.core.genmakebuilder
- clean,full,incremental,
-
-
- ?name?
-
-
-
- org.eclipse.cdt.make.core.append_environment
- true
-
-
- org.eclipse.cdt.make.core.autoBuildTarget
- all
-
-
- org.eclipse.cdt.make.core.buildArguments
- ${NDKROOT}/ndk-build V=0
-
-
- org.eclipse.cdt.make.core.buildCommand
- sh
-
-
- org.eclipse.cdt.make.core.cleanBuildTarget
- clean
-
-
- org.eclipse.cdt.make.core.contents
- org.eclipse.cdt.make.core.activeConfigSettings
-
-
- org.eclipse.cdt.make.core.enableAutoBuild
- false
-
-
- org.eclipse.cdt.make.core.enableCleanBuild
- true
-
-
- org.eclipse.cdt.make.core.enableFullBuild
- true
-
-
- org.eclipse.cdt.make.core.fullBuildTarget
- all
-
-
- org.eclipse.cdt.make.core.stopOnError
- true
-
-
- org.eclipse.cdt.make.core.useDefaultBuildCmd
- false
-
-
-
-
- com.android.ide.eclipse.adt.ResourceManagerBuilder
-
-
-
-
- com.android.ide.eclipse.adt.PreCompilerBuilder
-
-
-
-
- org.eclipse.jdt.core.javabuilder
-
-
-
-
- com.android.ide.eclipse.adt.ApkBuilder
-
-
-
-
- org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
- full,incremental,
-
-
-
-
-
- com.android.ide.eclipse.adt.AndroidNature
- org.eclipse.jdt.core.javanature
- org.eclipse.cdt.core.cnature
- org.eclipse.cdt.core.ccnature
- org.eclipse.cdt.managedbuilder.core.managedBuildNature
- org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
-
-
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.settings/org.eclipse.jdt.core.prefs b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.settings/org.eclipse.jdt.core.prefs
deleted file mode 100755
index a630537..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/.settings/org.eclipse.jdt.core.prefs
+++ /dev/null
@@ -1,12 +0,0 @@
-#Wed Jul 04 06:41:49 PDT 2012
-eclipse.preferences.version=1
-org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
-org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.5
-org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
-org.eclipse.jdt.core.compiler.compliance=1.5
-org.eclipse.jdt.core.compiler.debug.lineNumber=generate
-org.eclipse.jdt.core.compiler.debug.localVariable=generate
-org.eclipse.jdt.core.compiler.debug.sourceFile=generate
-org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
-org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
-org.eclipse.jdt.core.compiler.source=1.5
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/AndroidManifest.xml b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/AndroidManifest.xml
deleted file mode 100755
index 8b0d494..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/AndroidManifest.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/Android.mk b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/Android.mk
deleted file mode 100755
index d0f337e..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/Android.mk
+++ /dev/null
@@ -1,41 +0,0 @@
-#/****************************************************************************
-#* Cartoonifier, for Android.
-#*****************************************************************************
-#* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
-#* http://www.shervinemami.info/
-#*****************************************************************************
-#* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-#* Copyright Packt Publishing 2012.
-#* http://www.packtpub.com/cool-projects-with-opencv/book
-#****************************************************************************/
-
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-OPENCV_LIB_TYPE:=STATIC
-OPENCV_INSTALL_MODULES:=on
-
-# Path to OpenCV.mk file, which is generated when you build OpenCV for Android.
-# include C:\OpenCV\android\build\OpenCV.mk
-# include ~/OpenCV/android/build/OpenCV.mk
-include ../includeOpenCV.mk
-ifeq ("$(wildcard $(OPENCV_MK_PATH))","")
- #try to load OpenCV.mk from default install location
- include $(TOOLCHAIN_PREBUILT_ROOT)/user/share/OpenCV/OpenCV.mk
-else
- include $(OPENCV_MK_PATH)
-endif
-
-LOCAL_MODULE := cartoonifier
-LOCAL_LDLIBS += -llog -ldl
-
-# Since we have source + headers files in an external folder, we need to show where they are.
-LOCAL_SRC_FILES := jni_part.cpp
-LOCAL_SRC_FILES += ../../Cartoonifier_Desktop/cartoon.cpp
-LOCAL_SRC_FILES += ../../Cartoonifier_Desktop/ImageUtils_0.7.cpp
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/../../Cartoonifier_Desktop
-
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/Application.mk b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/Application.mk
deleted file mode 100755
index c7de9c7..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/Application.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-APP_STL := gnustl_static
-APP_CPPFLAGS := -frtti -fexceptions
-APP_ABI := armeabi-v7a
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/jni_part.cpp b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/jni_part.cpp
deleted file mode 100755
index 11c0236..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/jni/jni_part.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-/*****************************************************************************
-* Cartoonifier, for Android.
-******************************************************************************
-* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
-* http://www.shervinemami.info/
-******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-*****************************************************************************/
-
-#include
-#include
-#include
-#include
-
-
-#include "cartoon.h"
-#include "ImageUtils.h" // Handy functions for debugging OpenCV images, by Shervin Emami.
-
-
-using namespace std;
-using namespace cv;
-
-extern "C" {
-
-
-// Just show the plain camera image without modifying it.
-JNIEXPORT void JNICALL Java_com_Cartoonifier_CartoonifierView_ShowPreview(JNIEnv* env, jobject,
- jint width, jint height, jbyteArray yuv, jintArray bgra)
-{
- // Get native access to the given Java arrays.
- jbyte* _yuv = env->GetByteArrayElements(yuv, 0);
- jint* _bgra = env->GetIntArrayElements(bgra, 0);
-
- // Prepare a cv::Mat that points to the YUV420sp data.
- Mat myuv(height + height/2, width, CV_8UC1, (uchar *)_yuv);
- // Prepare a cv::Mat that points to the BGRA output data.
- Mat mbgra(height, width, CV_8UC4, (uchar *)_bgra);
-
- // Convert the color format from the camera's
- // NV21 "YUV420sp" format to an Android BGRA color image.
- cvtColor(myuv, mbgra, CV_YUV420sp2BGRA);
-
- // OpenCV can now access/modify the BGRA image if we want ...
-
-
- // Release the native lock we placed on the Java arrays.
- env->ReleaseIntArrayElements(bgra, _bgra, 0);
- env->ReleaseByteArrayElements(yuv, _yuv, 0);
-}
-
-
-DECLARE_TIMING(CartoonifyImage);
-
-
-// Modify the camera image using the Cartoonifier filter.
-JNIEXPORT void JNICALL Java_com_Cartoonifier_CartoonifierView_CartoonifyImage(JNIEnv* env, jobject,
- jint width, jint height, jbyteArray yuv, jintArray bgra,
- jboolean sketchMode, jboolean alienMode, jboolean evilMode, jboolean debugMode)
-{
- START_TIMING(CartoonifyImage);
-
- // Get native access to the given Java arrays.
- jbyte* _yuv = env->GetByteArrayElements(yuv, 0);
- jint* _bgra = env->GetIntArrayElements(bgra, 0);
-
- // Input color format (from camera):
- // "myuv" is the color image in the camera's native NV21 YUV 420 "semi-planar" format, which means
- // the first part of the array is the grayscale pixel array, followed by a quarter-sized pixel
- // array that is the U & V color channels interleaved. So if we just want to access a grayscale
- // image, we can get it directly from the 1st part of a YUV420sp semi-planar image without any
- // conversions. But if we want a color image (eg: BGRA color format that is recommended for OpenCV
- // on Android), then we must convert the color format using cvtColor().
- Mat myuv(height + height/2, width, CV_8UC1, (unsigned char *)_yuv); // Wrapper around the _yuv data.
- Mat mgray(height, width, CV_8UC1, (unsigned char *)_yuv); // Also a wrapper around the _yuv data.
-
- // Output color format (for display):
- // "mbgra" is the color image to be displayed on the Android device, in BGRA format (ie: OpenCV's
- // default BGR which is RGB but in the opposite byte order, but with an extra 0 byte on the end
- // of each pixel, so that each pixel is stored as Blue, Green, Red, 0). You can either do all
- // your processing in OpenCV's default BGR format and then convert your final output from BGR to
- // BGRA before display on the screen, or ideally you can ensure your image processing code can
- // handle BGRA format instead of or in addition to BGR format. This is particularly important if
- // you try to access pixels directly in the image!
- Mat mbgra(height, width, CV_8UC4, (unsigned char *)_bgra);
-
- // Convert the color format from the camera's YUV420sp semi-planar format to a regular BGR color image.
- Mat mbgr(height, width, CV_8UC3); // Allocate a new image buffer.
- cvtColor(myuv, mbgr, CV_YUV420sp2BGR);
-
-
- //--- Beginning of custom C/C++ image processing with OpenCV ---//
- Mat displayedFrame(mbgra.size(), CV_8UC3);
-
- // Use debug type 1 (for mobile) if debug mode is enabled, since we can't show popup GUI windows.
- int debugType = 0;
- if (debugMode)
- debugType = 1;
-
- // Do the C/C++ image processing.
- cartoonifyImage(mbgr, displayedFrame, sketchMode, alienMode, evilMode, debugType);
-
- // Convert back from OpenCV's BGR format to Android's BGRA format, unless if we can handle BGRA in our code.
- cvtColor(displayedFrame, mbgra, CV_BGR2BGRA);
- //--- End of custom C/C++ image processing with OpenCV ---//
-
-
- // Release the native lock we placed on the Java arrays.
- env->ReleaseIntArrayElements(bgra, _bgra, 0);
- env->ReleaseByteArrayElements(yuv, _yuv, 0);
-
- STOP_TIMING(CartoonifyImage);
- // Print the timing info.
- SHOW_TIMING(CartoonifyImage, "CartoonifyImage");
-}
-
-
-
-}//end of extern "C" (global C/C++ functions that aren't part of a C++ Class)
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/project.properties b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/project.properties
deleted file mode 100755
index d79abae..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/project.properties
+++ /dev/null
@@ -1,11 +0,0 @@
-# This file is automatically generated by Android Tools.
-# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
-#
-# This file must be checked in Version Control Systems.
-#
-# To customize properties used by the Ant build system use,
-# "ant.properties", and override values to adapt the script to your
-# project structure.
-
-# Project target.
-target=android-11
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/res/drawable/icon.png b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/res/drawable/icon.png
deleted file mode 100755
index 4e828ba..0000000
Binary files a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/res/drawable/icon.png and /dev/null differ
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/res/values/strings.xml b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/res/values/strings.xml
deleted file mode 100755
index 93b6b17..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/res/values/strings.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-
-
- Cartoonifier
-
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierApp.java b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierApp.java
deleted file mode 100755
index a8c12a9..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierApp.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*****************************************************************************
- * Cartoonifier, for Desktop or Android.
- * by Shervin Emami, 2012 (shervin.emami@gmail.com)
- * http://www.shervinemami.info/
- * Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
- * Copyright Packt Publishing 2012.
- *****************************************************************************/
-
-package com.Cartoonifier;
-
-import android.app.Activity;
-import android.app.AlertDialog;
-import android.content.DialogInterface;
-import android.os.Bundle;
-import android.util.Log;
-import android.view.Menu;
-import android.view.MenuItem;
-import android.view.Window;
-import android.view.View;
-import android.view.View.OnTouchListener;
-import android.view.MotionEvent;
-
-public class CartoonifierApp extends Activity implements OnTouchListener {
- private static final String TAG = "CartoonifierApp";
- private CartoonifierView mView;
-
- // Items for the Android menubar:
- private MenuItem mMenuSketch;
- private MenuItem mMenuAlien;
- private MenuItem mMenuEvil;
- private MenuItem mMenuDebug;
-
- public CartoonifierApp() {
- Log.i(TAG, "Instantiated new " + this.getClass());
- }
-
- @Override
- protected void onPause() {
- Log.i(TAG, "onPause");
- super.onPause();
- mView.releaseCamera();
- }
-
- @Override
- protected void onResume() {
- Log.i(TAG, "onResume");
- super.onResume();
- if( !mView.openCamera() ) {
- AlertDialog ad = new AlertDialog.Builder(this).create();
- ad.setCancelable(false); // This blocks the 'BACK' button
- ad.setMessage("Fatal error: can't open camera!");
- ad.setButton("OK", new DialogInterface.OnClickListener() {
- public void onClick(DialogInterface dialog, int which) {
- dialog.dismiss();
- finish();
- }
- });
- ad.show();
- }
- }
-
- /** Called when the activity is first created. */
- @Override
- public void onCreate(Bundle savedInstanceState) {
- Log.i(TAG, "onCreate");
- super.onCreate(savedInstanceState);
- requestWindowFeature(Window.FEATURE_NO_TITLE);
- mView = new CartoonifierView(this);
- setContentView(mView);
-
- // Call our "onTouch()" callback function whenever the user touches the screen.
- mView.setOnTouchListener(this);
- }
-
- /** Called when the menubar is being created by Android. */
- public boolean onCreateOptionsMenu(Menu menu) {
- Log.i(TAG, "onCreateOptionsMenu");
- mMenuSketch = menu.add("Sketch or Painting");
- mMenuAlien = menu.add("Alien or Human");
- mMenuEvil = menu.add("Evil or Good");
- mMenuDebug = menu.add("[Debug mode]");
- return true;
- }
-
- /** Called whenever the user pressed a menu item in the menubar. */
- public boolean onOptionsItemSelected(MenuItem item) {
- Log.i(TAG, "Menu Item selected: " + item);
- if (item == mMenuSketch)
- mView.toggleSketchMode();
- else if (item == mMenuAlien)
- mView.toggleAlienMode();
- else if (item == mMenuEvil)
- mView.toggleEvilMode();
- else if (item == mMenuDebug)
- mView.toggleDebugMode();
- return true;
- }
-
- /** Called whenever the user touches the touchscreen */
-// @Override
- public boolean onTouch(View v, MotionEvent m) {
- // Ignore finger movement event, we just care about when the finger first touches the screen.
- if (m.getAction() != MotionEvent.ACTION_DOWN) {
- return false; // We didn't do anything with this touch movement event.
- }
-
- Log.i(TAG, "onTouch down event");
-
- // Signal that we should cartoonify the next camera frame and save it, instead of just showing the sketch.
- mView.nextFrameShouldBeSaved(getBaseContext());
-
- return false;
- }
-}
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierView.java b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierView.java
deleted file mode 100755
index 829e22f..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierView.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/*****************************************************************************
- * Cartoonifier, for Desktop or Android.
- * by Shervin Emami, 2012 (shervin.emami@gmail.com)
- * http://www.shervinemami.info/
- * Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
- * Copyright Packt Publishing 2012.
- *****************************************************************************/
-
- package com.Cartoonifier;
-
-import android.content.Context;
-import android.graphics.Bitmap;
-
-// For saving Bitmaps to file and the Android picture gallery.
-import android.graphics.Bitmap.CompressFormat;
-import android.net.Uri;
-import android.os.Environment;
-import android.provider.MediaStore;
-import android.provider.MediaStore.Images;
-import android.text.format.DateFormat;
-import android.util.Log;
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-
-// For showing a Notification message when saving a file.
-import android.app.Notification;
-import android.app.NotificationManager;
-import android.app.PendingIntent;
-import android.content.ContentValues;
-import android.content.Intent;
-
-
-class CartoonifierView extends CartoonifierViewBase {
- private static final String TAG = "CartoonifierView";
-
- private int mFrameSize;
- private Bitmap mBitmap;
- private int[] mRGBA;
-
- // Set to true if you want to see line drawings instead of paintings.
- private boolean m_sketchMode = false;
- // Set to true if you want to change the skin color of the character to an alien color.
- private boolean m_alienMode = false;
- // Set to true if you want an evil "bad" character instead of a "good" character.
- private boolean m_evilMode = false;
- // Set to true if you want to see many windows created, showing various debug info. Set to 0 otherwise.
- private boolean m_debugMode = false;
-
- // Whether to cartoonify the next camera frame and save it, instead of just showing the sketch.
- private boolean bSaveThisFrame = false;
- // After processing, don't update the screen for a while, so the user can see the cartoonifier output.
- private boolean bFreezeOutput = false;
- // Set the delay of showing the processed image before displaying the next camera frames.
- private static final int FREEZE_OUTPUT_MSECS = 3000;
-
- private Context mContext; // Activity Context, so we can store to the Android Gallery.
-
- private int mNotificationID = 0; // Notification ID, so we can show a status notification message whenever an image is saved.
-
-
-
- public CartoonifierView(Context context) {
- super(context);
- }
-
- @Override
- protected void onPreviewStarted(int previewWidth, int previewHeight) {
- mFrameSize = previewWidth * previewHeight;
- mRGBA = new int[mFrameSize];
- mBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Bitmap.Config.ARGB_8888);
- }
-
- @Override
- protected void onPreviewStopped() {
- if(mBitmap != null) {
- mBitmap.recycle();
- mBitmap = null;
- }
- mRGBA = null;
- }
-
- // Show a notification message, saying that we've saved another image to the Gallery.
- protected void showNotificationMessage(Context context, String filename)
- {
- // Popup a notification message in the Android status bar. To make sure a notification
- // is shown for each image but only 1 is kept in the statusbar at a time, use a
- // different ID each time but delete previous messages before creating it.
-
- final NotificationManager mgr = (NotificationManager)context.getSystemService(Context.NOTIFICATION_SERVICE);
-
- // Close the previous popup message, so we only have 1 at a time, but it still shows a popup message for each one.
- if (mNotificationID > 0)
- mgr.cancel(mNotificationID);
- mNotificationID++;
-
- Notification notification = new Notification(R.drawable.icon, "Saving to gallery (image " + mNotificationID + ") ...", System.currentTimeMillis());
- Intent intent = new Intent(context, CartoonifierView.class);
- notification.flags |= Notification.FLAG_AUTO_CANCEL; // Close it if the user clicks on it.
- PendingIntent pendingIntent = PendingIntent.getActivity(context, 0, intent, 0);
- notification.setLatestEventInfo(context, "Cartoonifier saved " + mNotificationID + " images to Gallery", "Saved as '" + filename + "'", pendingIntent);
- mgr.notify(mNotificationID, notification);
- }
-
- // Save the processed image as a PNG file on the SD card and shown in the Android Gallery.
- protected void savePNGImageToGallery(Bitmap bmp, Context context, String baseFilename)
- {
- try {
- // Get the file path to the SD card.
- String baseFolder = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES).getAbsolutePath() + "/";
- File file = new File(baseFolder + baseFilename);
- Log.i(TAG, "Saving the processed image to file [" + file.getAbsolutePath() + "]");
-
- // Open the file.
- OutputStream out = new BufferedOutputStream(new FileOutputStream(file));
- // Save the image file as PNG.
- bmp.compress(CompressFormat.PNG, 100, out);
- out.flush(); // Make sure it is saved to file soon, because we are about to add it to the Gallery.
- out.close();
-
- // Add the PNG file to the Android Gallery.
- ContentValues image = new ContentValues();
- image.put(Images.Media.TITLE, baseFilename);
- image.put(Images.Media.DISPLAY_NAME, baseFilename);
- image.put(Images.Media.DESCRIPTION, "Processed by the Cartoonifier App");
- image.put(Images.Media.DATE_TAKEN, System.currentTimeMillis()); // Milliseconds since 1970 UTC.
- image.put(Images.Media.MIME_TYPE, "image/png");
- image.put(Images.Media.ORIENTATION, 0);
- image.put(Images.Media.DATA, file.getAbsolutePath());
- Uri result = context.getContentResolver().insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, image);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- }
-
-
- @Override
- protected Bitmap processFrame(byte[] data) {
- int[] rgba = mRGBA;
-
- // Only process the camera or update the screen if we aren’t supposed
- // to just show the cartoon image.
- if (bFreezeOutput) {
- // Only needs to be triggered here once.
- bFreezeOutput = false;
-
- // Wait for several seconds, doing nothing!
- try {
- wait(FREEZE_OUTPUT_MSECS);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- return null;
- }
-
- String baseFilename = "";
- if (!bSaveThisFrame) {
- // Quick preview (show either "sketch" mode or unmodified camera preview).
- if (m_sketchMode) {
- // Process the image using C/C++ code through NDK (JNI).
- CartoonifyImage(getFrameWidth(), getFrameHeight(), data, rgba, m_sketchMode, m_alienMode, m_evilMode, m_debugMode);
- }
- else {
- // Just prepare the camera image for display without any modification. Uses C/C++ code through NDK (JNI).
- ShowPreview(getFrameWidth(), getFrameHeight(), data, rgba);
- }
- }
- else {
- // Just do it once.
- bSaveThisFrame = false;
- // Don't update the screen for a while, so the user can see the cartoonifier output.
- bFreezeOutput = true;
-
- // Generate the filename that we will store it as, so we can display a notification message while it is processing.
- // Get the current date & time
- SimpleDateFormat s = new SimpleDateFormat("yyyy-MM-dd,HH-mm-ss");
- String timestamp = s.format(new Date());
- baseFilename = "Cartoon" + timestamp + ".png";
-
- // Show a notification message, saying that we've saved another image to the Gallery.
- showNotificationMessage(mContext, baseFilename);
-
- // Process the image using C/C++ code through NDK (JNI).
- CartoonifyImage(getFrameWidth(), getFrameHeight(), data, rgba, m_sketchMode, m_alienMode, m_evilMode, m_debugMode);
- }
-
- // Put the processed image into the Bitmap object that will be returned for display on the screen.
- Bitmap bmp = mBitmap;
- bmp.setPixels(rgba, 0/* offset */, getFrameWidth() /* stride */, 0, 0, getFrameWidth(), getFrameHeight());
-
- if (bFreezeOutput) {
- // Save the processed image as a PNG file on the SD card and shown in the Android Gallery.
- savePNGImageToGallery(bmp, mContext, baseFilename);
- }
-
- return bmp;
- }
-
- protected void toggleSketchMode() {
- m_sketchMode = !m_sketchMode;
- }
- protected void toggleAlienMode() {
- m_alienMode = !m_alienMode;
- }
- protected void toggleEvilMode() {
- m_evilMode = !m_evilMode;
- }
- protected void toggleDebugMode() {
- m_debugMode = !m_debugMode;
- }
-
- // Cartoonify the next camera frame and save it, instead of just showing the sketch.
- protected void nextFrameShouldBeSaved(Context context) {
- bSaveThisFrame = true;
- mContext = context;
- }
-
- // Declare the function prototypes of the C/C++ code using NDK (JNI):
-
- // Just show the camera image, without any modification. Converts from YUV 420 planar to BGRA packed format.
- public native void ShowPreview(int width, int height, byte[] yuv, int[] rgba);
-
- // Modify the camera image using the Cartoonifier filter.
- public native void CartoonifyImage(int width, int height, byte[] yuv, int[] rgba, boolean sketchMode, boolean alienMode, boolean evilMode, boolean debugMode);
-
- // Load (dynamically at runtime) the C/C++ code in "libcartoonifier.so" using NDK (JNI).
- static {
- System.loadLibrary("cartoonifier");
- }
-}
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierViewBase.java b/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierViewBase.java
deleted file mode 100755
index 675053f..0000000
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Android/src/com/Cartoonifier/CartoonifierViewBase.java
+++ /dev/null
@@ -1,232 +0,0 @@
-/*****************************************************************************
- * Cartoonifier, for Desktop or Android.
- * by Shervin Emami, 2012 (shervin.emami@gmail.com)
- * http://www.shervinemami.info/
- * Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
- * Copyright Packt Publishing 2012.
- *****************************************************************************/
-
-package com.Cartoonifier;
-
-import java.io.IOException;
-import java.util.List;
-import android.content.Context;
-import android.graphics.Bitmap;
-import android.graphics.Canvas;
-import android.graphics.ImageFormat;
-import android.graphics.SurfaceTexture;
-import android.hardware.Camera;
-import android.hardware.Camera.PreviewCallback;
-import android.os.Build;
-import android.util.Log;
-import android.view.SurfaceHolder;
-import android.view.SurfaceView;
-
-public abstract class CartoonifierViewBase extends SurfaceView implements SurfaceHolder.Callback, Runnable {
- private static final String TAG = "Cartoonifier::SurfaceView";
-
- private Camera mCamera;
- private SurfaceHolder mHolder;
- private int mFrameWidth;
- private int mFrameHeight;
- private byte[] mFrame;
- private boolean mThreadRun;
- private byte[] mBuffer;
-
- // Signal that a camera frame is ready, without blocking the main thread.
- private boolean mCameraIsInitialized = false;
-
-
- public CartoonifierViewBase(Context context) {
- super(context);
- mHolder = getHolder();
- mHolder.addCallback(this);
- Log.i(TAG, "Instantiated new " + this.getClass());
- }
-
- public int getFrameWidth() {
- return mFrameWidth;
- }
-
- public int getFrameHeight() {
- return mFrameHeight;
- }
-
- public void setPreview() throws IOException {
- if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB)
- mCamera.setPreviewTexture( new SurfaceTexture(10) );
- else
- mCamera.setPreviewDisplay(null);
- }
-
- public boolean openCamera() {
- Log.i(TAG, "openCamera");
- releaseCamera();
- mCamera = Camera.open();
- if(mCamera == null) {
- Log.e(TAG, "Can't open camera!");
- return false;
- }
-
- mCamera.setPreviewCallbackWithBuffer(new PreviewCallback() {
- public void onPreviewFrame(byte[] data, Camera camera) {
- // Whenever a camera preview frame is ready, just copy it straight to our mFrame,
- // and don't worry about blocking the main UI thread until it is safe.
- System.arraycopy(data, 0, mFrame, 0, data.length);
- camera.addCallbackBuffer(mBuffer);
-
- // Signal that a camera frame is ready, without blocking the main thread.
- mCameraIsInitialized = true;
- }
- });
- return true;
- }
-
- public void releaseCamera() {
- Log.i(TAG, "releaseCamera");
- mThreadRun = false;
- synchronized (this) {
- if (mCamera != null) {
- mCamera.stopPreview();
- mCamera.setPreviewCallback(null);
- mCamera.release();
- mCamera = null;
-
- // If this app was paused and restarted, it should wait for camera initialization again.
- mCameraIsInitialized = false;
- }
- }
- onPreviewStopped();
- }
-
- public void setupCamera(int width, int height) {
- Log.i(TAG, "setupCamera(" + width + "x" + height + ")");
- synchronized (this) {
- if (mCamera != null) {
- Camera.Parameters params = mCamera.getParameters();
- List sizes = params.getSupportedPreviewSizes();
- mFrameWidth = width;
- mFrameHeight = height;
-
- // selecting optimal camera preview size that is most similar to the screen height.
- {
- int minDiff = Integer.MAX_VALUE;
- for (Camera.Size size : sizes) {
- Log.i(TAG, "Found Camera Resolution " + size.width + "x" + size.height);
- if (Math.abs(size.height - height) < minDiff) {
- mFrameWidth = size.width;
- mFrameHeight = size.height;
- minDiff = Math.abs(size.height - height);
- }
- }
- }
-
- params.setPreviewSize(getFrameWidth(), getFrameHeight());
-
- List FocusModes = params.getSupportedFocusModes();
- if (FocusModes.contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO))
- {
- params.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
- }
-
- mCamera.setParameters(params);
-
- /* Now allocate the buffer */
- params = mCamera.getParameters();
- Log.i(TAG, "Chosen Camera Preview Size: " + params.getPreviewSize().width + "x" + params.getPreviewSize().height);
- int size = params.getPreviewSize().width * params.getPreviewSize().height;
- size = size * ImageFormat.getBitsPerPixel(params.getPreviewFormat()) / 8;
- mBuffer = new byte[size];
- /* The buffer where the current frame will be copied */
- mFrame = new byte [size];
- mCamera.addCallbackBuffer(mBuffer);
-
- try {
- setPreview();
- } catch (IOException e) {
- Log.e(TAG, "mCamera.setPreviewDisplay/setPreviewTexture fails: " + e);
- }
-
- /* Notify that the preview is about to be started and deliver preview size */
- onPreviewStarted(params.getPreviewSize().width, params.getPreviewSize().height);
-
- /* Now we can start a preview */
- mCamera.startPreview();
- }
- }
- }
-
- public void surfaceChanged(SurfaceHolder _holder, int format, int width, int height) {
- Log.i(TAG, "surfaceChanged(). Window size: " + width + "x" + height);
- setupCamera(width, height);
- }
-
- public void surfaceCreated(SurfaceHolder holder) {
- Log.i(TAG, "surfaceCreated");
- (new Thread(this)).start();
- }
-
- public void surfaceDestroyed(SurfaceHolder holder) {
- Log.i(TAG, "surfaceDestroyed");
- releaseCamera();
- }
-
-
- /* The bitmap returned by this method shall be owned by the child and released in onPreviewStopped() */
- protected abstract Bitmap processFrame(byte[] data);
-
- /**
- * This method is called when the preview process is being started. It is called before the first frame delivered and processFrame is called
- * It is called with the width and height parameters of the preview process. It can be used to prepare the data needed during the frame processing.
- * @param previewWidth - the width of the preview frames that will be delivered via processFrame
- * @param previewHeight - the height of the preview frames that will be delivered via processFrame
- */
- protected abstract void onPreviewStarted(int previewWidtd, int previewHeight);
-
- /**
- * This method is called when preview is stopped. When this method is called the preview stopped and all the processing of frames already completed.
- * If the Bitmap object returned via processFrame is cached - it is a good time to recycle it.
- * Any other resources used during the preview can be released.
- */
- protected abstract void onPreviewStopped();
-
- public void run() {
- mThreadRun = true;
- Log.i(TAG, "Starting processing thread");
-
- // Wait until the first camera frame is ready.
- try {
- while (mThreadRun && !mCameraIsInitialized) {
- synchronized (this) {
- wait(100); // wait 100 milliseconds before trying again.
- }
- }
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
-
- // Background processing loop.
- // Each iteration of this loop will process the latest camera image and render it to the
- // screen. Camera frames will be dropped, if it is not processed fast enough. The user
- // interface runs on the "main" thread, so intensive operations here won't delay the user
- // interface or cause "Application Not Responding".
- while (mThreadRun) {
- Bitmap bmp = null;
-
- // Process this frame.
- synchronized (this) {
- bmp = processFrame(mFrame);
- }
-
- // Display this frame (on the "main" UI thread).
- if (bmp != null) {
- Canvas canvas = mHolder.lockCanvas();
- if (canvas != null) {
- canvas.drawBitmap(bmp, (canvas.getWidth() - getFrameWidth()) / 2, (canvas.getHeight() - getFrameHeight()) / 2, null);
- mHolder.unlockCanvasAndPost(canvas);
- }
- }
- }//end of background processing loop.
-
- }
-}
diff --git a/Chapter1_AndroidCartoonifier/README.txt b/Chapter1_AndroidCartoonifier/README.txt
deleted file mode 100755
index e2560cc..0000000
--- a/Chapter1_AndroidCartoonifier/README.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-******************************************************************************
-* Cartoonifier, for Desktop or Android.
-******************************************************************************
-* by Shervin Emami, 5th Dec 2012
-* http://shervinemami.info/openCV.html
-******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-******************************************************************************
-
-This folder contains 2 Cartoonifier projects, performing the same task:
-"Cartoonifier_Desktop": A desktop program (works on Windows, Mac, Linux, etc).
-"Cartoonifier_Android": An Android app (requires Android 3.0 SDK or higher, and an Android 2.2 device or higher).
-
-Cartoonifier_Android is a GUI wrapper, so it accesses some of the same C/C++ files in the Cartoonifier_Desktop folder:
- "cartoon.cpp" & "cartoon.h": all of the Cartoonifier image processing.
- "ImageUtils_v0.7.cpp" & "ImageUtils.h": useful functions for debugging OpenCV code.
-
-Each project has code for its user interface and the project files in its own folder:
-
-"Cartoonifier_Desktop" just uses the file "main_desktop.cpp" for its OpenCV user interface on a desktop.
-It includes a CMake project file to allow building with different compilers & versions for Windows, Mac, Linux, etc.
-
-"Cartoonifier_Android" has an Android folder tree for its Android user interface, including:
- Java files in the "src" folder,
- C/C++ NDK files in the "jni" folder,
- app resources in the "res" folder.
-It includes an Eclipse project, that you can use for Android cross-development on Windows, Mac & Linux.
-
-
-----------------------------------------------------------
-Building the Cartoonifier_Desktop project using CMake from the command-line:
-----------------------------------------------------------
-Linux:
- export OpenCV_DIR="~/OpenCV/build"
- mkdir build
- cd build
- cmake -D OpenCV_DIR=$OpenCV_DIR ..
- make
-
-MacOSX (Xcode):
- export OpenCV_DIR="~/OpenCV/build"
- mkdir build
- cd build
- cmake -G Xcode -D OpenCV_DIR=$OpenCV_DIR ..
- open Cartoonifier_Desktop.xcodeproj
-
-Windows (MS Visual Studio):
- set OpenCV_DIR="C:\OpenCV\build"
- mkdir build
- cd build
- cmake -G "Visual Studio 9 2008" -D OpenCV_DIR=%OpenCV_DIR% ..
- start Cartoonifier_Desktop.sln
-
-
-----------------------------------------------------------
-Running the project:
-----------------------------------------------------------
-Just execute "Cartoonifier_Desktop".
-
-
-----------------------------------------------------------
-Building & Running the Cartoonifier_Android project:
-----------------------------------------------------------
-Follow the steps recommended in Chapter 1 of the book.
-
diff --git a/Chapter1_AndroidCartoonifier/screenshot.png b/Chapter1_AndroidCartoonifier/screenshot.png
deleted file mode 100644
index 18e89f5..0000000
Binary files a/Chapter1_AndroidCartoonifier/screenshot.png and /dev/null differ
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/CMakeLists.txt b/Chapter1_EmbeddedCartoonifier/Cartoonifier/CMakeLists.txt
similarity index 82%
rename from Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/CMakeLists.txt
rename to Chapter1_EmbeddedCartoonifier/Cartoonifier/CMakeLists.txt
index 12634dc..fb8e426 100755
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/CMakeLists.txt
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/CMakeLists.txt
@@ -1,12 +1,12 @@
cmake_minimum_required (VERSION 2.6)
-PROJECT(Cartoonifier_Desktop)
+PROJECT(Cartoonifier)
# Requires OpenCV
FIND_PACKAGE( OpenCV REQUIRED )
SET(SRC
- main_desktop.cpp
+ main.cpp
cartoon.cpp
ImageUtils_0.7.cpp
)
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/ImageUtils.h b/Chapter1_EmbeddedCartoonifier/Cartoonifier/ImageUtils.h
similarity index 96%
rename from Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/ImageUtils.h
rename to Chapter1_EmbeddedCartoonifier/Cartoonifier/ImageUtils.h
index 5e60be0..0f3b1a7 100755
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/ImageUtils.h
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/ImageUtils.h
@@ -3,11 +3,11 @@
* Handy utility functions for dealing with images in OpenCV (desktop or Android).
* The most recent version of this will always be available from "http://shervinemami.info/openCV.html"
******************************************************************************
-* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/
******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
@@ -16,11 +16,10 @@
// OpenCV
-#include
-//#include
-#include
+#include "opencv2/imgproc/imgproc.hpp"
+
#ifdef USE_HIGHGUI
- #include
+ #include "opencv2/highgui/highgui.hpp"
#endif
@@ -53,11 +52,11 @@
#else
// For stdout debug logging, with a new-line character on the end:
#ifndef _MSC_VER
- // Compiles on GCC but not MSVC:
- #define LOG(fmt, args...) do {printf(fmt, ## args); printf("\n"); fflush(stdout);} while (0)
- // #define LOG printf
+ // Compiles on GCC but maybe not MSVC:
+ //#define LOG(fmt, args...) do {printf(fmt, ## args); printf("\n"); fflush(stdout);} while (0)
+ #define LOG(...) do {} while (0)
#else
- #define LOG printf
+ #define LOG(...) do {} while (0)
#endif
#endif
#endif
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/ImageUtils_0.7.cpp b/Chapter1_EmbeddedCartoonifier/Cartoonifier/ImageUtils_0.7.cpp
similarity index 87%
rename from Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/ImageUtils_0.7.cpp
rename to Chapter1_EmbeddedCartoonifier/Cartoonifier/ImageUtils_0.7.cpp
index 33ce552..a7b1285 100755
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/ImageUtils_0.7.cpp
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/ImageUtils_0.7.cpp
@@ -3,11 +3,11 @@
* Handy utility functions for dealing with images in OpenCV (desktop or Android).
* The most recent version of this will always be available from "http://shervinemami.info/openCV.html"
******************************************************************************
-* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/
******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
@@ -1694,245 +1694,6 @@ IplImage* smoothImageBilateral(const IplImage *src, float smoothness)
return imageOut;
}
-
-// Paste multiple images next to each other as a single image, for saving or displaying.
-// Remember to free the returned image.
-// Sample usage: cvSaveImage("out.png", combineImages(2, img1, img2) );
-// Modified by Shervin from the cvShowManyImages() function on the OpenCVWiki by Parameswaran.
-// 'combineImagesResized()' will resize all images to 300x300, whereas 'combineImages()' doesn't resize the images at all.
-IplImage* combineImagesResized(int nArgs, ...)
-{
- // img - Used for getting the arguments
- IplImage *img;
-
- // DispImage - the image in which input images are to be copied
- IplImage *DispImage;
-
- int size;
- int i;
- int m, n;
- int x, y;
-
- // w - Maximum number of images in a row
- // h - Maximum number of images in a column
- int w, h;
-
- // scale - How much we have to resize the image
- float scale;
- int max;
-
- // If the number of arguments is lesser than 0 or greater than 12
- // return without displaying
- if(nArgs <= 0) {
- printf("Number of arguments too small....\n");
- return NULL;
- }
- else if(nArgs > 12) {
- printf("Number of arguments too large....\n");
- return NULL;
- }
- // Determine the size of the image,
- // and the number of rows/cols
- // from number of arguments
- else if (nArgs == 1) {
- w = h = 1;
- size = 300;
- }
- else if (nArgs == 2) {
- w = 2; h = 1;
- size = 300;
- }
- else if (nArgs == 3 || nArgs == 4) {
- w = 2; h = 2;
- size = 300;
- }
- else if (nArgs == 5 || nArgs == 6) {
- w = 3; h = 2;
- size = 200;
- }
- else if (nArgs == 7 || nArgs == 8) {
- w = 4; h = 2;
- size = 200;
- }
- else {
- w = 4; h = 3;
- size = 150;
- }
-
- // Create a new 3 channel image
- DispImage = cvCreateImage( cvSize(100 + size*w, 60 + size*h), 8, 3 );
-
- // Used to get the arguments passed
- va_list args;
- va_start(args, nArgs);
-
- // Loop for nArgs number of arguments
- for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {
-
- // Get the Pointer to the IplImage
- img = va_arg(args, IplImage*);
-
- // Make sure a proper image has been obtained
- if(img) {
-
- // Find the width and height of the image
- x = img->width;
- y = img->height;
-
- // Find whether height or width is greater in order to resize the image
- max = (x > y)? x: y;
-
- // Find the scaling factor to resize the image
- scale = (float) ( (float) max / size );
-
- // Used to Align the images
- if( i % w == 0 && m!= 20) {
- m = 20;
- n+= 20 + size;
- }
-
- // Make sure we have a color image. If its greyscale, then convert it to color.
- IplImage *colorImg = 0;
- IplImage *currImg = img;
- if (img->nChannels == 1) {
- colorImg = cvCreateImage(cvSize(img->width, img->height), 8, 3 );
- //std::cout << "[Converting greyscale image " << greyImg->width << "x" << greyImg->height << "px to color for combineImages()]" << std::endl;
- cvCvtColor( img, colorImg, CV_GRAY2BGR );
- currImg = colorImg; // Use the greyscale version as the input.
- }
-
- // Set the image ROI to display the current image
- cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));
-
- // Resize the input image and copy it to the Single Big Image
- cvResize(currImg, DispImage, CV_INTER_CUBIC);
-
- // Reset the ROI in order to display the next image
- cvResetImageROI(DispImage);
-
- if (colorImg)
- cvReleaseImage(&colorImg);
- }
- else { // This input image is NULL
- //printf("Error in combineImages(): Bad image%d given as argument\n", i);
- //cvReleaseImage(&DispImage); // Release the image and return
- //return NULL;
- }
- }
-
- // End the number of arguments
- va_end(args);
-
- return DispImage;
-}
-
-
-
-// Paste multiple images next to each other as a single image, for saving or displaying.
-// Remember to free the returned image.
-// Sample usage: cvSaveImage("out.png", combineImages(2, img1, img2) );
-// Modified by Shervin from the cvShowManyImages() function on the OpenCVWiki by Parameswaran.
-// 'combineImagesResized()' will resize all images to 300x300, whereas 'combineImages()' doesn't resize the images at all.
-IplImage* combineImages(int nArgs, ...)
-{
- const int MAX_COMBINED_IMAGES = 6;
- int col1Width, col2Width;
- int row1Height, row2Height, row3Height;
- IplImage *imageArray[MAX_COMBINED_IMAGES];
- int xPos[MAX_COMBINED_IMAGES];
- int yPos[MAX_COMBINED_IMAGES];
- int wImg[MAX_COMBINED_IMAGES] = {0}; // image dimensions are assumed to be 0, if they dont exist.
- int hImg[MAX_COMBINED_IMAGES] = {0};
- //int rows, columns; // number of rows & cols of images.
- int wP, hP; // dimensions of the combined image.
- int i;
- int nGoodImages = 0;
- IplImage *combinedImage;
- int B = 5; // Border size, in pixels
-
- // Load all the images that were passed as arguments
- va_list args; // Used to get the arguments passed
- va_start(args, nArgs);
- for (i = 0; i < nArgs; i++) {
- // Get the Pointer to the IplImage
- IplImage *img = va_arg(args, IplImage*);
- // Make sure a proper image has been obtained, and that there aren't too many images already.
- if ((img != 0 && img->width > 0 && img->height > 0) && (nGoodImages < MAX_COMBINED_IMAGES) ) {
- // Add the new image to the array of images
- imageArray[nGoodImages] = img;
- wImg[nGoodImages] = img->width;
- hImg[nGoodImages] = img->height;
- nGoodImages++;
- }
- }
-
- // If the number of arguments is lesser than 0 or greater than 12,
- // return without displaying
- if( nGoodImages <= 0 || nGoodImages > MAX_COMBINED_IMAGES ) {
- printf("Error in combineImages(): Cant display %d of %d images\n", nGoodImages, nArgs);
- return NULL;
- }
-
- // Determine the size of the combined image & number of rows/cols.
- //columns = MIN(nGoodImages, 2); // 1 or 2 columns
- //rows = (nGoodImages-1) / 2; // 1 or 2 or 3 or ... rows
- col1Width = MAX(wImg[0], MAX(wImg[2], wImg[4]));
- col2Width = MAX(wImg[1], MAX(wImg[3], wImg[5]));
- row1Height = MAX(hImg[0], hImg[1]);
- row2Height = MAX(hImg[2], hImg[3]);
- row3Height = MAX(hImg[4], hImg[5]);
- wP = B + col1Width + B + (col2Width ? col2Width + B : 0);
- hP = B + row1Height + B + (row2Height ? row2Height + B : 0) + (row3Height ? row3Height + B : 0);
- xPos[0] = B;
- yPos[0] = B;
- xPos[1] = B + col1Width + B;
- yPos[1] = B;
- xPos[2] = B;
- yPos[2] = B + row1Height + B;
- xPos[3] = B + col1Width + B;
- yPos[3] = B + row1Height + B;
- xPos[4] = B;
- yPos[4] = B + row1Height + B + row2Height + B;
- xPos[5] = B + col1Width + B;
- yPos[5] = B + row1Height + B + row2Height + B;
-
- // Create a new RGB image
- combinedImage = cvCreateImage( cvSize(wP, hP), 8, 3 );
- if (!combinedImage)
- return NULL;
-
- // Clear the background
- cvSet(combinedImage, CV_RGB(50,50,50));
-
- for (i=0; i < nGoodImages; i++) {
- IplImage *img = imageArray[i];
-
- // Make sure we have a color image. If its greyscale, then convert it to color.
- IplImage *colorImg = 0;
- if (img->nChannels == 1) {
- colorImg = cvCreateImage(cvSize(img->width, img->height), 8, 3 );
- cvCvtColor( img, colorImg, CV_GRAY2BGR );
- img = colorImg; // Use the greyscale version as the input.
- }
-
- // Set the image ROI to display the current image
- cvSetImageROI(combinedImage, cvRect(xPos[i], yPos[i], img->width, img->height));
- // Draw this image into its position
- cvCopy(img, combinedImage);
- // Reset the ROI in order to display the next image
- cvResetImageROI(combinedImage);
-
- if (colorImg)
- cvReleaseImage(&colorImg);
- }
-
- // End the number of arguments
- va_end(args);
-
- return combinedImage;
-}
-
-
// Blend color images 'image1' and 'image2' using an 8-bit alpha-blending mask channel.
// Equivalent to this operation on each pixel: imageOut = image1 * (1-(imageAlphaMask/255)) + image2 * (imageAlphaMask/255)
// So if a pixel in imageAlphMask is 0, then that pixel in imageOut will be image1, or if imageAlphaMask is 255 then imageOut is image2,
@@ -2125,20 +1886,3 @@ void saveFloatImage(const char *filename, const IplImage *srcImg)
#endif
}
-// Draw some text onto an image using printf() format.
-void drawText(IplImage *img, CvPoint position, CvScalar color, char *fmt, ...)
-{
- // Write the given arguments to 'szMsg' using printf() style formatting.
- va_list marker;
- char szMsg[1024];
- va_start(marker, fmt);
- vsprintf(szMsg, fmt, marker);
- va_end(marker);
- //marker = marker; // stop warning messages
-
- // Display the text onto the image.
- CvFont font;
- cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.3,0.3, 0, 1, CV_AA);
- cvPutText(img, szMsg, position, &font, color);
-}
-
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/cartoon.cpp b/Chapter1_EmbeddedCartoonifier/Cartoonifier/cartoon.cpp
similarity index 97%
rename from Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/cartoon.cpp
rename to Chapter1_EmbeddedCartoonifier/Cartoonifier/cartoon.cpp
index 4a12938..2047487 100755
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/cartoon.cpp
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/cartoon.cpp
@@ -2,11 +2,11 @@
* cartoon.cpp
* Create a cartoon-like or painting-like image filter.
******************************************************************************
-* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/
******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
@@ -36,7 +36,7 @@ void cartoonifyImage(Mat srcColor, Mat dst, bool sketchMode, bool alienMode, boo
// Generate a nice edge mask, similar to a pencil line drawing.
Laplacian(srcGray, edges, CV_8U, 5);
threshold(edges, mask, 80, 255, THRESH_BINARY_INV);
- // Mobile cameras usually have lots of noise, so remove small
+ // Tiny cameras usually have lots of noise, so remove small
// dots of black noise from the black & white edge mask.
removePepperNoise(mask);
}
@@ -169,9 +169,10 @@ void changeFacialSkinColor(Mat smallImgBGR, Mat bigEdges, int debugType)
}
-// Remove black dots (upto 4x4 in size) of noise from a pure black & white image.
+// Remove black dots (upto 3x3 in size) of noise from a pure black & white image.
// ie: The input image should be mostly white (255) and just contains some black (0) noise
// in addition to the black (0) edges.
+// Note this can be done using erode & dilate, but the effect isn't as nice.
void removePepperNoise(Mat &mask)
{
// For simplicity, ignore the top & bottom row border.
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/cartoon.h b/Chapter1_EmbeddedCartoonifier/Cartoonifier/cartoon.h
similarity index 87%
rename from Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/cartoon.h
rename to Chapter1_EmbeddedCartoonifier/Cartoonifier/cartoon.h
index 6af77be..2acc2f9 100755
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/cartoon.h
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/cartoon.h
@@ -2,11 +2,11 @@
* cartoon.h
* Create a cartoon-like or painting-like image filter.
******************************************************************************
-* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/
******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
@@ -18,7 +18,8 @@
#include
// Include OpenCV's C++ Interface
-#include "opencv2/opencv.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/highgui/highgui.hpp"
using namespace cv;
diff --git a/Chapter1_EmbeddedCartoonifier/Cartoonifier/fps_timer.hpp b/Chapter1_EmbeddedCartoonifier/Cartoonifier/fps_timer.hpp
new file mode 100644
index 0000000..9b86143
--- /dev/null
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/fps_timer.hpp
@@ -0,0 +1,63 @@
+/*****************************************************************************
+* Simple FPS event timer
+******************************************************************************
+* by Jason Saragih, 5th Dec 2012
+* http://jsaragih.org/
+******************************************************************************
+* Ch6 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition
+* Copyright Packt Publishing 2016.
+* http://www.packtpub.com/cool-projects-with-opencv/book
+*****************************************************************************
+* (Code was re-formatted and re-used by Shervin Emami for Ch1 of the same book)
+*****************************************************************************/
+
+#pragma once
+
+
+#include
+#include "stdio.h" // For 'sprintf()'
+
+
+//==============================================================================
+class fps_timer { //frames/second timer for tracking
+public:
+ int64 t_start; //start time
+ int64 t_end; //end time
+ float fps; //current frames/sec
+ int fnum; //number of frames since @t_start
+
+ fps_timer() { //default constructor
+ this->reset();
+ }
+
+ void increment() { //increment timer index
+ if(fnum >= 29) {
+ t_end = cv::getTickCount();
+ fps = 30.0 / (float(t_end-t_start)/getTickFrequency());
+ t_start = t_end; fnum = 0;
+ } else
+ fnum += 1;
+ }
+
+
+ void reset() { //reset timer
+ t_start = cv::getTickCount();
+ fps = 0;
+ fnum = 0;
+ }
+
+
+ void display_fps(Mat &im, //image to display FPS on
+ Point p = Point(-1,-1)) { //bottom left corner of text
+ char str[256];
+ Point pt;
+ if (p.y < 0)
+ pt = Point(10,im.rows-20);
+ else
+ pt = p;
+ sprintf(str,"%d frames/sec", (int)cvRound(fps));
+ string text = str;
+ putText(im,text,pt,FONT_HERSHEY_SIMPLEX,0.5,Scalar::all(255));
+ }
+
+};
diff --git a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/main_desktop.cpp b/Chapter1_EmbeddedCartoonifier/Cartoonifier/main.cpp
similarity index 65%
rename from Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/main_desktop.cpp
rename to Chapter1_EmbeddedCartoonifier/Cartoonifier/main.cpp
index 81a03d9..23e9fb7 100755
--- a/Chapter1_AndroidCartoonifier/Cartoonifier_Desktop/main_desktop.cpp
+++ b/Chapter1_EmbeddedCartoonifier/Cartoonifier/main.cpp
@@ -1,21 +1,23 @@
/*****************************************************************************
-* Cartoonifier_Desktop.cpp, for Desktop.
+* Cartoonifier.cpp: Main GUI for the Cartoonifier application.
* Converts a real-life camera stream to look like a cartoon.
-* This file is for a desktop executable, but the cartoonifier can also be used in an Android / iOS project.
+* This file is for a desktop or embedded Linux executable.
******************************************************************************
-* by Shervin Emami, 5th Dec 2012 (shervin.emami@gmail.com)
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/
******************************************************************************
-* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
// Try to set the camera resolution. Note that this only works for some cameras on
// some computers and only for some drivers, so don't rely on it to work!
-const int DESIRED_CAMERA_WIDTH = 640;
-const int DESIRED_CAMERA_HEIGHT = 480;
+const int DEFAULT_CAMERA_WIDTH = 640;
+const int DEFAULT_CAMERA_HEIGHT = 480;
+
+const char * DEFAULT_CAMERA_NUMBER = "0";
const int NUM_STICK_FIGURE_ITERATIONS = 40; // Sets how long the stick figure face should be shown for skin detection.
@@ -23,7 +25,7 @@ const char *windowName = "Cartoonifier"; // Name shown in the GUI window.
// Set to true if you want to see line drawings instead of paintings.
-bool m_sketchMode = false;
+bool m_sketchMode = true;
// Set to true if you want to change the skin color of the character to an alien color.
bool m_alienMode = false;
// Set to true if you want an evil "bad" character instead of a "good" character.
@@ -35,6 +37,7 @@ bool m_debugMode = false;
#include
#include
+#include // For isdigit()
// Include OpenCV's C++ Interface
#include "opencv2/opencv.hpp"
@@ -43,6 +46,7 @@ bool m_debugMode = false;
//#include "detectObject.h" // Easily detect faces or eyes (using LBP or Haar Cascades).
#include "cartoon.h" // Cartoonify a photo.
#include "ImageUtils.h" // Shervin's handy OpenCV utility functions.
+#include "fps_timer.hpp" // FPS timer by Jason Saragih.
using namespace cv;
using namespace std;
@@ -55,18 +59,29 @@ int m_stickFigureIterations = 0; // Draws a stick figure outline for where the
-// Get access to the webcam.
-void initWebcam(VideoCapture &videoCapture, int cameraNumber)
+// Get access to the webcam or video source. cameraNumber should be a number
+// (eg: "0" or "1") but can also be a video file or stream URL.
+void initCamera(VideoCapture &videoCapture, char* cameraNumber)
{
- // Get access to the default camera.
+ // First try to access to the camera as a camera number such as 0
try { // Surround the OpenCV call by a try/catch block so we can give a useful error message!
- videoCapture.open(cameraNumber);
+ if ( isdigit(cameraNumber[0]) ) {
+ videoCapture.open(atoi(cameraNumber));
+ }
} catch (cv::Exception &e) {}
+
if ( !videoCapture.isOpened() ) {
- cerr << "ERROR: Could not access the camera!" << endl;
- exit(1);
+ // Also try to access to the camera as a video file or URL.
+ try { // Surround the OpenCV call by a try/catch block so we can give a useful error message!
+ videoCapture.open(cameraNumber);
+ } catch (cv::Exception &e) {}
+
+ if ( !videoCapture.isOpened() ) {
+ cerr << "ERROR: Could not access the camera " << cameraNumber << " !" << endl;
+ exit(1);
+ }
}
- cout << "Loaded camera " << cameraNumber << "." << endl;
+ cout << "Loaded camera " << cameraNumber << endl;
}
@@ -101,9 +116,11 @@ void onKeypress(char key)
int main(int argc, char *argv[])
{
- cout << "Cartoonifier, by Shervin Emami (www.shervinemami.info), June 2012." << endl;
+ cout << "Cartoonifier, by Shervin Emami (www.shervinemami.info), June 2016." << endl;
cout << "Converts real-life images to cartoon-like images." << endl;
cout << "Compiled with OpenCV version " << CV_VERSION << endl;
+ cout << "usage: " << argv[0] << " [[camera_number] desired_width desired_height ]" << endl;
+ cout << "default: " << argv[0] << " " << DEFAULT_CAMERA_NUMBER << " " << DEFAULT_CAMERA_WIDTH << " " << DEFAULT_CAMERA_HEIGHT << endl;
cout << endl;
cout << "Keyboard commands (press in the GUI window):" << endl;
@@ -114,23 +131,44 @@ int main(int argc, char *argv[])
cout << " d: change debug mode." << endl;
cout << endl;
+ char *cameraNumber = (char*)DEFAULT_CAMERA_NUMBER;
+ int desiredCameraWidth = DEFAULT_CAMERA_WIDTH;
+ int desiredCameraHeight = DEFAULT_CAMERA_HEIGHT;
+
// Allow the user to specify a camera number, since not all computers will be the same camera number.
- int cameraNumber = 0; // Change this if you want to use a different camera device.
- if (argc > 1) {
- cameraNumber = atoi(argv[1]);
+ int a = 1;
+ if (argc > a) {
+ cameraNumber = argv[a];
+ a++; // Next arg
+
+ // Allow the user to specify camera resolution.
+ if (argc > a) {
+ desiredCameraWidth = atoi(argv[a]);
+ a++; // Next arg
+
+ if (argc > a) {
+ desiredCameraHeight = atoi(argv[a]);
+ a++; // Next arg
+ }
+ }
}
// Get access to the camera.
VideoCapture camera;
- initWebcam(camera, cameraNumber);
+ initCamera(camera, cameraNumber);
// Try to set the camera resolution. Note that this only works for some cameras on
// some computers and only for some drivers, so don't rely on it to work!
- camera.set(CV_CAP_PROP_FRAME_WIDTH, DESIRED_CAMERA_WIDTH);
- camera.set(CV_CAP_PROP_FRAME_HEIGHT, DESIRED_CAMERA_HEIGHT);
+ camera.set(CV_CAP_PROP_FRAME_WIDTH, desiredCameraWidth);
+ camera.set(CV_CAP_PROP_FRAME_HEIGHT, desiredCameraHeight);
// Create a GUI window for display on the screen.
- namedWindow(windowName); // Resizable window, might not work on Windows.
+ namedWindow(windowName, WINDOW_NORMAL); // Fullscreen windows must be _NORMAL
+ // Make our window fullscreen.
+ setWindowProperty(windowName, WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
+
+ // Keep track of the recent FPS status.
+ fps_timer timer;
// Run forever, until the user hits Escape to "break" out of this loop.
while (true) {
@@ -159,6 +197,17 @@ int main(int argc, char *argv[])
m_stickFigureIterations--;
}
+ // Show the current FPS, displayed to the text console
+ timer.increment();
+ if (timer.fnum == 0) {
+ double fps;
+ if (timer.fps < 1.0f)
+ fps = timer.fps; // FPS is a fraction
+ else
+ fps = (int)(timer.fps + 0.5f); // FPS is a large number
+ cout << fps << " FPS" << endl;
+ }
+
imshow(windowName, displayedFrame);
// IMPORTANT: Wait for atleast 20 milliseconds, so that the image can be displayed on the screen!
diff --git a/Chapter1_EmbeddedCartoonifier/README.txt b/Chapter1_EmbeddedCartoonifier/README.txt
new file mode 100755
index 0000000..a8213c9
--- /dev/null
+++ b/Chapter1_EmbeddedCartoonifier/README.txt
@@ -0,0 +1,56 @@
+******************************************************************************
+* Cartoonifier, for Desktop or Android.
+******************************************************************************
+* by Shervin Emami, 5th Dec 2012
+* http://shervinemami.info/openCV.html
+******************************************************************************
+* Ch1 of the book "Mastering OpenCV with Practical Computer Vision Projects"
+* 2nd Edition.
+* Copyright Packt Publishing 2016.
+* http://www.packtpub.com/cool-projects-with-opencv/book
+******************************************************************************
+
+This folder contains the Cartoonifier project, that can be used on desktop
+(works on Windows, Mac, Linux, etc) or embedded (works on Raspberry Pi, etc).
+
+The file "main.cpp" is for the OpenCV user interface and "cartoon.cpp" is for
+the image processing.
+It includes a CMake project file to allow building with different compilers &
+versions for Windows, Mac, Linux, etc.
+
+
+----------------------------------------------------------
+Building the Cartoonifier project using CMake from the command-line:
+(From the "Cartoonifier" folder):
+----------------------------------------------------------
+Embedded (Raspberry Pi, etc):
+ Follow the steps recommended in Chapter 1 of the book.
+
+Desktop Linux:
+ export OpenCV_DIR="~/OpenCV/build"
+ mkdir build
+ cd build
+ cmake -D OpenCV_DIR=$OpenCV_DIR ..
+ make
+
+MacOSX (Xcode):
+ export OpenCV_DIR="~/OpenCV/build"
+ mkdir build
+ cd build
+ cmake -G Xcode -D OpenCV_DIR=$OpenCV_DIR ..
+ open Cartoonifier.xcodeproj
+
+Windows (MS Visual Studio):
+ set OpenCV_DIR="C:\OpenCV\build"
+ mkdir build
+ cd build
+ cmake -G "Visual Studio 9 2008" -D OpenCV_DIR=%OpenCV_DIR% ..
+ start Cartoonifier.sln
+
+
+----------------------------------------------------------
+Running the project:
+(From the "Cartoonifier" folder):
+----------------------------------------------------------
+Just execute "Cartoonifier", such as "./Cartoonifier" in Linux.
+
diff --git a/Chapter1_EmbeddedCartoonifier/install_opencv_from_source.sh b/Chapter1_EmbeddedCartoonifier/install_opencv_from_source.sh
new file mode 100755
index 0000000..ab9993a
--- /dev/null
+++ b/Chapter1_EmbeddedCartoonifier/install_opencv_from_source.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Linux script to download & build & install OpenCV from source, and install dependency packages.
+# Should work on Debian or Ubuntu or Linux Mint or Raspbian, whether it is on a desktop PC or an
+# embedded device such as a Raspberry Pi.
+# By Shervin Emami 2016 (www.shervinemami.info)
+
+# Exit the script if any command gives an error.
+set -o errexit
+
+# Set the version of OpenCV that we want.
+VERSION=3.1.0
+echo "This Linux script will download & install OpenCV $VERSION and its dependencies."
+echo "Note that it can take anywhere between 15 minutes (on a PC) and 10 hours (on a RPi 1)!"
+
+echo "Making sure we know where the latest dependency library packages are on the web ..."
+sudo apt-get -y update
+
+echo
+echo "Installing many dependencies ..."
+
+echo "Installing the compiler & build system ..."
+sudo apt-get -y install build-essential make cmake cmake-curses-gui g++ pkg-config
+echo "Installing libav video input/output development libraries ..."
+sudo apt-get -y install libavformat-dev libavutil-dev libswscale-dev
+echo "Installing video4Linux camera development libraries ..."
+sudo apt-get -y install libv4l-dev
+echo "Installing eigen3 math development libraries ..."
+sudo apt-get -y install libeigen3-dev
+echo "Installing OpenGL development libraries (to allow creating graphical windows) ..."
+sudo apt-get -y install libglew1.6-dev
+echo "Install GTK development libraries (to allow creating graphical windows) ..."
+sudo apt-get -y install libgtk2.0-dev
+
+echo
+echo "Downloading OpenCV $VERSION source code including the contrib modules ..."
+cd ~
+wget --continue --tries=300 -O opencv-${VERSION}.zip https://github.com/Itseez/opencv/archive/${VERSION}.zip
+wget --continue --tries=300 -O opencv_contrib-${VERSION}.zip https://github.com/Itseez/opencv_contrib/archive/${VERSION}.zip
+
+echo "Unzipping OpenCV ..."
+rm -rf ~/opencv-${VERSION} || true
+rm -rf ~/opencv_contrib-${VERSION} || true
+unzip opencv-${VERSION}.zip
+unzip opencv_contrib-${VERSION}.zip
+
+echo
+echo "Configuring OpenCV settings using CMake ..."
+cd ~/opencv-${VERSION}
+mkdir build
+cd build
+cmake -DCMAKE_BUILD_TYPE=RELEASE -DBUILD_EXAMPLES=OFF -DWITH_OPENMP=ON -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DOPENCV_EXTRA_MODULES_PATH=~/opencv_contrib-${VERSION}/modules ..
+
+echo
+# Compile the code across the same number of threads as the CPU has cores,
+# and if there is an error don't exit yet, try again using a single thread, for better error messages.
+CPU_CORES=$(grep -c ^processor /proc/cpuinfo)
+echo "Building OpenCV from source, directly on this machine using ${CPU_CORES} threads. Takes between 15 mins to 8 hours ..."
+make -j ${CPU_CORES} || true
+# Compile any remaining code using a single thread, since it occasionally has trouble using multiple threads.
+make
+
+echo
+echo "Installing OpenCV $VERSION to a system folder ..."
+sudo make install
+sudo ldconfig
+
+echo
+echo "OpenCV installed successfully!"
+
diff --git a/Chapter1_EmbeddedCartoonifier/screenshot.png b/Chapter1_EmbeddedCartoonifier/screenshot.png
new file mode 100644
index 0000000..ece8b46
Binary files /dev/null and b/Chapter1_EmbeddedCartoonifier/screenshot.png differ
diff --git a/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.cpp b/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.cpp
index 6fcf286..a1f6330 100755
--- a/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.cpp
+++ b/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.cpp
@@ -17,39 +17,8 @@
#include
#include "stdio.h" // For 'sprintf()'
#define fl at
-//==============================================================================
-//==============================================================================
-//==============================================================================
-//=============================== fps_timer ====================================
-//==============================================================================
-//==============================================================================
-//==============================================================================
-void
-fps_timer::
-increment()
-{
- if(fnum >= 29) {
- t_end = cv::getTickCount();
- fps = 30.0/(float(t_end-t_start)/getTickFrequency());
- t_start = t_end; fnum = 0;
- }else fnum += 1;
-}
-//==============================================================================
-void
-fps_timer::
-reset(){
-t_start = cv::getTickCount(); fps = 0; fnum = 0;
-}
-//==============================================================================
-void
-fps_timer::
-display_fps(Mat &im,
- Point p)
-{
- char str[256]; Point pt; if(p.y < 0)pt = Point(10,im.rows-20); else pt = p;
- sprintf(str,"%d frames/sec",(int)cvRound(fps)); string text = str;
- putText(im,text,pt,FONT_HERSHEY_SIMPLEX,0.5,Scalar::all(255));
-}
+#include "opencv_hotshots/ft/fps_timer.hpp"
+
//==============================================================================
//==============================================================================
//==============================================================================
diff --git a/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.hpp b/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.hpp
index e99678e..250d1b1 100755
--- a/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.hpp
+++ b/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/face_tracker.hpp
@@ -17,24 +17,7 @@
#include "opencv_hotshots/ft/patch_model.hpp"
#include "opencv_hotshots/ft/shape_model.hpp"
#include "opencv_hotshots/ft/face_detector.hpp"
-//==============================================================================
-class fps_timer{ //frames/second timer for tracking
-public:
- int64 t_start; //start time
- int64 t_end; //end time
- float fps; //current frames/sec
- int fnum; //number of frames since @t_start
-
- fps_timer(){this->reset();} //default constructor
-
- void increment(); //increment timer index
-
- void reset(); //reset timer
-
- void
- display_fps(Mat &im, //image to display FPS on
- Point p = Point(-1,-1)); //bottom left corner of text
-};
+#include "opencv_hotshots/ft/fps_timer.hpp"
//==============================================================================
class face_tracker_params{ //face tracking parameters
public:
diff --git a/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/fps_timer.hpp b/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/fps_timer.hpp
new file mode 100644
index 0000000..9b86143
--- /dev/null
+++ b/Chapter6_NonRigidFaceTracking/src/opencv_hotshots/ft/fps_timer.hpp
@@ -0,0 +1,63 @@
+/*****************************************************************************
+* Simple FPS event timer
+******************************************************************************
+* by Jason Saragih, 5th Dec 2012
+* http://jsaragih.org/
+******************************************************************************
+* Ch6 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition
+* Copyright Packt Publishing 2016.
+* http://www.packtpub.com/cool-projects-with-opencv/book
+*****************************************************************************
+* (Code was re-formatted and re-used by Shervin Emami for Ch1 of the same book)
+*****************************************************************************/
+
+#pragma once
+
+
+#include
+#include "stdio.h" // For 'sprintf()'
+
+
+//==============================================================================
+class fps_timer { //frames/second timer for tracking
+public:
+ int64 t_start; //start time
+ int64 t_end; //end time
+ float fps; //current frames/sec
+ int fnum; //number of frames since @t_start
+
+ fps_timer() { //default constructor
+ this->reset();
+ }
+
+ void increment() { //increment timer index
+ if(fnum >= 29) {
+ t_end = cv::getTickCount();
+ fps = 30.0 / (float(t_end-t_start)/getTickFrequency());
+ t_start = t_end; fnum = 0;
+ } else
+ fnum += 1;
+ }
+
+
+ void reset() { //reset timer
+ t_start = cv::getTickCount();
+ fps = 0;
+ fnum = 0;
+ }
+
+
+ void display_fps(Mat &im, //image to display FPS on
+ Point p = Point(-1,-1)) { //bottom left corner of text
+ char str[256];
+ Point pt;
+ if (p.y < 0)
+ pt = Point(10,im.rows-20);
+ else
+ pt = p;
+ sprintf(str,"%d frames/sec", (int)cvRound(fps));
+ string text = str;
+ putText(im,text,pt,FONT_HERSHEY_SIMPLEX,0.5,Scalar::all(255));
+ }
+
+};
diff --git a/Chapter7_HeadPoseEstimation/CMakeLists.txt b/Chapter7_HeadPoseEstimation/CMakeLists.txt
index 04f856b..76a279a 100755
--- a/Chapter7_HeadPoseEstimation/CMakeLists.txt
+++ b/Chapter7_HeadPoseEstimation/CMakeLists.txt
@@ -3,7 +3,9 @@ cmake_minimum_required (VERSION 2.6)
PROJECT(HeadOrientation)
# Requires OpenCV
-FIND_PACKAGE( OpenCV REQUIRED )
+FIND_PACKAGE( OpenCV 3 )
+
+MESSAGE ( ${OpenCV_VERSION} )
SET(SRC
main.cpp
diff --git a/Chapter7_HeadPoseEstimation/PAW.cpp b/Chapter7_HeadPoseEstimation/PAW.cpp
index 827aacf..3fb9e6d 100755
--- a/Chapter7_HeadPoseEstimation/PAW.cpp
+++ b/Chapter7_HeadPoseEstimation/PAW.cpp
@@ -1,377 +1,305 @@
#include "PAW.h"
#include "Triangle.h"
+#include
#include
#include
#include
PAW::PAW(Mat srcLandmarks, Mat dstLandmarks, int width, int height)
{
- this->srcLandmarks = srcLandmarks;
- this->dstLandmarks = dstLandmarks;
- this->baseImageWidth = width;
- this->baseImageHeight = height;
- nLandmarks = srcLandmarks.rows;
- init();
+ this->srcLandmarks = srcLandmarks;
+ this->dstLandmarks = dstLandmarks;
+ this->baseImageWidth = width;
+ this->baseImageHeight = height;
+ nLandmarks = srcLandmarks.rows;
+ init();
}
-Mat PAW::getSourceLandmarks(){
- return this->srcLandmarks;
+Mat PAW::getSourceLandmarks() {
+ return this->srcLandmarks;
}
-Mat PAW::getDestLandmarks(){
- return this->dstLandmarks;
+Mat PAW::getDestLandmarks() {
+ return this->dstLandmarks;
}
-int PAW::getNLandmarks(){
- return nLandmarks;
+int PAW::getNLandmarks() {
+ return nLandmarks;
}
-int PAW::getBaseImageWidth(){
- return baseImageWidth;
+int PAW::getBaseImageWidth() {
+ return baseImageWidth;
}
-int PAW::getBaseImageHeight(){
- return baseImageHeight;
+int PAW::getBaseImageHeight() {
+ return baseImageHeight;
}
-map& PAW::getMapPointTriangle(){
- return mapPointTriangle;
+map& PAW::getMapPointTriangle() {
+ return mapPointTriangle;
}
-Mat PAW::getWarp(){
- return warp;
+Mat PAW::getWarp() {
+ return warp;
}
-void PAW::calculateWarpMatrix(){
- double x, y, xi, yi, xj, yj, xk, yk, xi0, yi0, xj0, yj0, xk0, yk0;
- double a1,a2,a3,a4,a5,a6;
- int index[3];
- warp = cv::Mat(nTriangles, 6, CV_64FC1);
-
- for(int i=0;i(i,0);
- index[1] = triangles.at(i,1);
- index[2] = triangles.at(i,2);
- xi0 = srcLandmarks.at(index[0],0);
- yi0 = srcLandmarks.at(index[0],1);
- xj0 = srcLandmarks.at(index[1],0);
- yj0 = srcLandmarks.at(index[1],1);
- xk0 = srcLandmarks.at(index[2],0);
- yk0 = srcLandmarks.at(index[2],1);
-
- xi = dstLandmarks.at(index[0],0);
- yi = dstLandmarks.at(index[0],1);
- xj = dstLandmarks.at(index[1],0);
- yj = dstLandmarks.at(index[1],1);
- xk = dstLandmarks.at(index[2],0);
- yk = dstLandmarks.at(index[2],1);
-
- a1 = (xi*xj0*yk0 - xi*xk0*yj0 - xi0*xj*yk0 + xi0*xk*yj0 + xj*xk0*yi0 - xj0*xk*yi0)/(xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
- a2 = (xi*yj0 - xj*yi0 - xi*yk0 + xk*yi0 + xj*yk0 - xk*yj0)/(xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
- a3 = -(xi*xj0 - xi0*xj - xi*xk0 + xi0*xk + xj*xk0 - xj0*xk)/(xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
-
- a4 = -(xi0*yj*yk0 - xi0*yj0*yk - xj0*yi*yk0 + xj0*yi0*yk + xk0*yi*yj0 - xk0*yi0*yj)/(xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
- a5 = (yi*yj0 - yi0*yj - yi*yk0 + yi0*yk + yj*yk0 - yj0*yk)/(xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
- a6 = (xi0*yj - xj0*yi - xi0*yk + xk0*yi + xj0*yk - xk0*yj)/(xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
-
- warp.at(i,0) = a1;
- warp.at(i,1) = a2;
- warp.at(i,2) = a3;
- warp.at(i,3) = a4;
- warp.at(i,4) = a5;
- warp.at(i,5) = a6;
+void PAW::calculateWarpMatrix() {
+ double x, y, xi, yi, xj, yj, xk, yk, xi0, yi0, xj0, yj0, xk0, yk0;
+ double a1, a2, a3, a4, a5, a6;
+ int index[3];
+ warp = cv::Mat(nTriangles, 6, CV_64FC1);
+
+ for (int i = 0;i < nTriangles;i++) {
+ index[0] = triangles.at(i, 0);
+ index[1] = triangles.at(i, 1);
+ index[2] = triangles.at(i, 2);
+ xi0 = srcLandmarks.at(index[0], 0);
+ yi0 = srcLandmarks.at(index[0], 1);
+ xj0 = srcLandmarks.at(index[1], 0);
+ yj0 = srcLandmarks.at(index[1], 1);
+ xk0 = srcLandmarks.at(index[2], 0);
+ yk0 = srcLandmarks.at(index[2], 1);
+
+ xi = dstLandmarks.at(index[0], 0);
+ yi = dstLandmarks.at(index[0], 1);
+ xj = dstLandmarks.at(index[1], 0);
+ yj = dstLandmarks.at(index[1], 1);
+ xk = dstLandmarks.at(index[2], 0);
+ yk = dstLandmarks.at(index[2], 1);
+
+ a1 = (xi*xj0*yk0 - xi*xk0*yj0 - xi0*xj*yk0 + xi0*xk*yj0 + xj*xk0*yi0 - xj0*xk*yi0) / (xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
+ a2 = (xi*yj0 - xj*yi0 - xi*yk0 + xk*yi0 + xj*yk0 - xk*yj0) / (xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
+ a3 = -(xi*xj0 - xi0*xj - xi*xk0 + xi0*xk + xj*xk0 - xj0*xk) / (xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
+
+ a4 = -(xi0*yj*yk0 - xi0*yj0*yk - xj0*yi*yk0 + xj0*yi0*yk + xk0*yi*yj0 - xk0*yi0*yj) / (xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
+ a5 = (yi*yj0 - yi0*yj - yi*yk0 + yi0*yk + yj*yk0 - yj0*yk) / (xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
+ a6 = (xi0*yj - xj0*yi - xi0*yk + xk0*yi + xj0*yk - xk0*yj) / (xi0*yj0 - xj0*yi0 - xi0*yk0 + xk0*yi0 + xj0*yk0 - xk0*yj0);
+
+ warp.at(i, 0) = a1;
+ warp.at(i, 1) = a2;
+ warp.at(i, 2) = a3;
+ warp.at(i, 3) = a4;
+ warp.at(i, 4) = a5;
+ warp.at(i, 5) = a6;
+
+ }
- }
-
}
-vector PAW::getPointsInsideHull(){
- return pointsInsideHull;
+vector PAW::getPointsInsideHull() {
+ return pointsInsideHull;
}
/*
- This method will calculate the convex hull of source landmarks
- and populate the pointsInsideHull vector with these points coordinates
+ This method will calculate the convex hull of source landmarks
+ and populate the pointsInsideHull vector with these points coordinates
*/
-void PAW::populatePointsInsideHull(){
- //calc scrLandmarks convex hull
- CvPoint* pointsHull = (CvPoint*)malloc( nLandmarks * sizeof(pointsHull[0]));
- int* hull = (int*)malloc( nLandmarks * sizeof(hull[0]));
- CvMat pointMat = cvMat( 1, nLandmarks, CV_32SC2, pointsHull );
- CvMat hullMat = cvMat( 1, nLandmarks, CV_32SC1, hull );
-
-
- for(int i = 0; i < nLandmarks; i++ )
- {
- pointsHull[i] = cvPoint(srcLandmarks.at(i,0),srcLandmarks.at(i,1));
- }
- cvConvexHull2( &pointMat, &hullMat, CV_CLOCKWISE, 0 );
- int hullcount = hullMat.cols;
-
- CvPoint* pointsHullFinal = (CvPoint*)malloc( hullcount * sizeof(pointsHullFinal[0]));
-
-
- for(int i = 0; i < hullcount; i++ ){
- int ptIndex = hull[i];
- CvPoint pt = cvPoint( srcLandmarks.at(ptIndex,0),
- srcLandmarks.at(ptIndex,1));
-
- pointsHullFinal[i] = pt;
- }
+void PAW::populatePointsInsideHull() {
+ //calc scrLandmarks convex hull
+ CvPoint* pointsHull = (CvPoint*)malloc(nLandmarks * sizeof(pointsHull[0]));
+ int* hull = (int*)malloc(nLandmarks * sizeof(hull[0]));
+ CvMat pointMat = cvMat(1, nLandmarks, CV_32SC2, pointsHull);
+ CvMat hullMat = cvMat(1, nLandmarks, CV_32SC1, hull);
- CvMat hullMatPoints = cvMat( 1, hullcount, CV_32SC2, pointsHullFinal);
-
- //check if point belongs
- for (int j=0;j=0){
- pointsInsideHull.push_back(cvPoint(i,j));
- }
- }
- }
+
+ for (int i = 0; i < nLandmarks; i++)
+ {
+ pointsHull[i] = cvPoint(srcLandmarks.at(i, 0), srcLandmarks.at(i, 1));
+ }
+ cvConvexHull2(&pointMat, &hullMat, CV_CLOCKWISE, 0);
+ int hullcount = hullMat.cols;
+
+ CvPoint* pointsHullFinal = (CvPoint*)malloc(hullcount * sizeof(pointsHullFinal[0]));
+
+
+ for (int i = 0; i < hullcount; i++) {
+ int ptIndex = hull[i];
+ CvPoint pt = cvPoint(srcLandmarks.at(ptIndex, 0),
+ srcLandmarks.at(ptIndex, 1));
+
+ pointsHullFinal[i] = pt;
+ }
+
+ CvMat hullMatPoints = cvMat(1, hullcount, CV_32SC2, pointsHullFinal);
+
+ //check if point belongs
+ for (int j = 0;j < baseImageHeight;j++) {
+ for (int i = 0;i < baseImageWidth;i++) {
+
+ double distance = cvPointPolygonTest(&hullMatPoints, cvPoint2D32f(i, j), 1);
+ if (distance >= 0) {
+ pointsInsideHull.push_back(cvPoint(i, j));
+ }
+ }
+ }
}
/*
- This function uses Delaunay triangulation to populate the
- triangles matrix
+ This function uses Delaunay triangulation to populate the
+ triangles matrix
*/
-void PAW::triangulate(){
- CvMemStorage* storage;
- CvSubdiv2D* subdiv;
- IplImage* img;
-
- int par;
-
- std::vector points;
- vector triangleVertices;
-
-
- CvRect rect = { 0, 0, baseImageWidth, baseImageHeight};
- storage = cvCreateMemStorage(0);
- subdiv = cvCreateSubdivDelaunay2D(rect,storage);
-
-
- //insert srcLandmark points in Delaunay subdivision
- for(int i=0;i(i,0);
- double y = srcLandmarks.at(i,1);
- points.push_back(cvPoint(srcLandmarks.at(i,0),srcLandmarks.at(i,1)));
- CvPoint2D32f fp = cvPoint2D32f(x, y);
- cvSubdivDelaunay2DInsert( subdiv, fp );
- }
+void PAW::triangulate() {
+ //Opencv3 CvMemStorage* storage;
+ Subdiv2D* subdiv;
+ IplImage* img;
+
+ int par;
+
+ std::vector points;
+ vector triangleVertices;
+
+
+ CvRect rect = { 0, 0, baseImageWidth, baseImageHeight };
+ //Opencv3 storage = cvCreateMemStorage(0);
+ //Opencv3 subdiv = cvCreateSubdivDelaunay2D(rect,storage);
+
+ subdiv = new Subdiv2D(rect);
+
+
+ //insert srcLandmark points in Delaunay subdivision
+ for (int i = 0;i < nLandmarks;i++) {
+ double x = srcLandmarks.at(i, 0);
+ double y = srcLandmarks.at(i, 1);
+ points.push_back(cvPoint(srcLandmarks.at(i, 0), srcLandmarks.at(i, 1)));
+ CvPoint2D32f fp = cvPoint2D32f(x, y);
+ //Opencv3 cvSubdivDelaunay2DInsert( subdiv, fp );
+ subdiv->insert(fp);
+ }
-
-
- CvNextEdgeType triangleDirections[2] = {CV_NEXT_AROUND_LEFT,CV_NEXT_AROUND_RIGHT};
-
- for(int tdi = 0;tdi<2;tdi++){
- CvNextEdgeType triangleDirection = triangleDirections[tdi];
-
- IplImage* triangleFrame = cvCreateImage(cvSize(baseImageWidth,baseImageHeight),IPL_DEPTH_32F,3);
-
- CvScalar delaunay_color, voronoi_color;
- delaunay_color = CV_RGB( 200,0,0);
- voronoi_color = CV_RGB(0, 200, 0);
-
-
- CvSeqReader reader;
- int i, total = subdiv->edges->total;
- int elem_size = subdiv->edges->elem_size;
-
- cvStartReadSeq( (CvSeq*)(subdiv->edges), &reader, 0 );
-
- CvPoint buf[3];
- printf("Total %d\n",total);
- for( i = 0; i < total; i++ )
- {
- CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);
-
- if( CV_IS_SET_ELEM( edge ))
- {
- //draw_subdiv_edge( img, (CvSubdiv2DEdge)edge + 1, voronoi_color );
-
- //TODO optimize this part of code, since we could use a map (and put order) or get points index from delaunay subdiv
- //if(i==par){
- CvSubdiv2DEdge t = (CvSubdiv2DEdge)edge ;
- int shouldPaint=1;
- for(int j=0;j<3;j++){
-
- CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
- if( !pt ) break;
- buf[j] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
- t = cvSubdiv2DGetEdge( t, triangleDirection );
- if((pt->pt.x<0)||(pt->pt.x>baseImageWidth))
- shouldPaint=0;
- if((pt->pt.y<0)||(pt->pt.y>baseImageHeight))
- shouldPaint=0;
- }
- if(shouldPaint){
- //cvFillConvexPoly( img, buf, 3, CV_RGB(0,.1+10.0/255.0,0), CV_AA, 0 );
- int originalVertices[3];
- for(int j=0;j<3;j++){
- int px = buf[j].x;
- int py = buf[j].y;
- for(int k=0;k(imageIndex,originalVertices[0]*2);
- int p1y = pcaSet.at(imageIndex,originalVertices[0]*2+1);
-
- int p2x = pcaSet.at(imageIndex,originalVertices[1]*2);
- int p2y = pcaSet.at(imageIndex,originalVertices[1]*2+1);
-
- int p3x = pcaSet.at(imageIndex,originalVertices[2]*2);
- int p3y = pcaSet.at(imageIndex,originalVertices[2]*2+1);
-
-
-
- Point2f srcTri[3];
- Point2f dstTri[3];
-
- srcTri[0] = Point2f( p1x, p1y );
- srcTri[1] = Point2f( p2x, p2y );
- srcTri[2] = Point2f( p3x, p3y );
-
- dstTri[0] = Point2f( buf[0].x, buf[0].y );
- dstTri[1] = Point2f( buf[1].x, buf[1].y );
- dstTri[2] = Point2f( buf[2].x, buf[2].y );
- */
- //warpTextureFromTriangle(srcTri, originalImage, dstTri, warp_final);
-
- /*cvLine(new IplImage(warp_final),cvPoint(p1x,p1y),cvPoint(p2x,p2y),CV_RGB(0,255,0),1,8,0);
- cvLine(new IplImage(warp_final),cvPoint(p2x,p2y),cvPoint(p3x,p3y),CV_RGB(0,255,0),1,8,0);
- cvLine(new IplImage(warp_final),cvPoint(p3x,p3y),cvPoint(p1x,p1y),CV_RGB(0,255,0),1,8,0);*/
- }
-
-
- //draw_subdiv_edge( triangleFrame, (CvSubdiv2DEdge)edge, delaunay_color );
- }
- CV_NEXT_SEQ_ELEM( elem_size, reader );
- }
-
- //string num = static_cast( &(ostringstream() << countFrame++) )->str();
- //imshow("Warped final "+ num,warp_final);
-
- //clean up repeated triangles
-
-
-
- set triangleSet;
- for(int i=0;i::iterator it;
- int count=0;
- for (it=triangleSet.begin(); it!=triangleSet.end(); it++){
- cout << (*it).v1 << " " << (*it).v2 << " " << (*it).v3 << endl;
- triangles.at(count,0) = ( (*it).v1);
- triangles.at(count,1) = ( (*it).v2);
- triangles.at(count,2) = ( (*it).v3);
- count++;
- }
- cout << endl;
- nTriangles = count;
-
- Mat triangleMat(triangleFrame);
- imshow("Triangle frame",triangleMat);
-
- populatePointTriangleMap();
+
+
+
+ //clean up repeated triangles
+
+
+
+ set triangleSet;
+ for (int i = 0;i < triangleVertices.size() / 3;i += 1) {
+ printf("%2d %2d %2d\n", triangleVertices.at(3 * i), triangleVertices.at(3 * i + 1), triangleVertices.at(3 * i + 2));
+ Triangle t(triangleVertices.at(3 * i), triangleVertices.at(3 * i + 1), triangleVertices.at(3 * i + 2));
+ triangleSet.insert(t);
+ }
+
+
+ triangles = Mat::zeros(triangleSet.size(), 3, CV_32S);
+
+ set::iterator it;
+ int count = 0;
+ for (it = triangleSet.begin(); it != triangleSet.end(); it++) {
+ cout << (*it).v1 << " " << (*it).v2 << " " << (*it).v3 << endl;
+ triangles.at(count, 0) = ((*it).v1);
+ triangles.at(count, 1) = ((*it).v2);
+ triangles.at(count, 2) = ((*it).v3);
+ count++;
+ }
+ cout << endl;
+ nTriangles = count;
+
+
+ vector triangleList;
+
+ subdiv->getTriangleList(triangleList);
+ vector pt(3);
+
+
+ Mat triangleMat(0, 0, baseImageWidth, baseImageHeight);
+ Scalar delaunay_color(255, 255, 255);
+ for( size_t i = 0; i < triangleList.size(); i++ )
+ {
+ Vec6f t = triangleList[i];
+ pt[0] = Point(cvRound(t[0]), cvRound(t[1]));
+ pt[1] = Point(cvRound(t[2]), cvRound(t[3]));
+ pt[2] = Point(cvRound(t[4]), cvRound(t[5]));
+ line(triangleMat, pt[0], pt[1], delaunay_color, 1, LINE_AA, 0);
+ line(triangleMat, pt[1], pt[2], delaunay_color, 1, LINE_AA, 0);
+ line(triangleMat, pt[2], pt[0], delaunay_color, 1, LINE_AA, 0);
}
+
+// end
+
+
+
+ imshow("Triangle frame", triangleMat);
+
+ populatePointTriangleMap();
+
+
}
-bool PAW::isPointInsideTriangleIndex(int px, int py, int triangleIndex){
- //look for triangles in source landmarks
- int v1, v2, v3;
- v1 = triangles.at(triangleIndex,0);
- v2 = triangles.at(triangleIndex,1);
- v3 = triangles.at(triangleIndex,2);
-
- int x1 = srcLandmarks.at(v1,0);
- int y1 = srcLandmarks.at(v1,1);
- int x2 = srcLandmarks.at(v2,0);
- int y2 = srcLandmarks.at(v2,1);
- int x3 = srcLandmarks.at(v3,0);
- int y3 = srcLandmarks.at(v3,1);
-
- return isPointInsideTriangle(px,py,x1,y1,x2,y2,x3,y3);
+bool PAW::isPointInsideTriangleIndex(int px, int py, int triangleIndex) {
+ //look for triangles in source landmarks
+ int v1, v2, v3;
+ v1 = triangles.at(triangleIndex, 0);
+ v2 = triangles.at(triangleIndex, 1);
+ v3 = triangles.at(triangleIndex, 2);
+
+ int x1 = srcLandmarks.at(v1, 0);
+ int y1 = srcLandmarks.at(v1, 1);
+ int x2 = srcLandmarks.at(v2, 0);
+ int y2 = srcLandmarks.at(v2, 1);
+ int x3 = srcLandmarks.at(v3, 0);
+ int y3 = srcLandmarks.at(v3, 1);
+
+ return isPointInsideTriangle(px, py, x1, y1, x2, y2, x3, y3);
}
//uses barycentric coordinates from wikipedia
-bool PAW::isPointInsideTriangle(int x, int y, int x1, int y1, int x2, int y2, int x3, int y3){
- double denominator = (y2-y3)*(x1-x3)+(x3-x2)*(y1-y3);
- double lambda1 = ((y2-y3)*(x-x3)+(x3-x2)*(y-y3))/denominator;
- double lambda2 = ((y3-y1)*(x-x3)+(x1-x3)*(y-y3))/denominator;
- double lambda3 = 1 - lambda1 - lambda2;
- double eps = 0.000000001;
- bool belongs = false;
- if((0-eps <= lambda1)&&(lambda1 <=1+eps)){
- if((0-eps <= lambda2)&&(lambda2 <=1+eps)){
- if((0-eps <= lambda3)&&(lambda3 <=1+eps)){
- belongs = true;
- }
- }
- }
- return belongs;
+bool PAW::isPointInsideTriangle(int x, int y, int x1, int y1, int x2, int y2, int x3, int y3) {
+ double denominator = (y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3);
+ double lambda1 = ((y2 - y3)*(x - x3) + (x3 - x2)*(y - y3)) / denominator;
+ double lambda2 = ((y3 - y1)*(x - x3) + (x1 - x3)*(y - y3)) / denominator;
+ double lambda3 = 1 - lambda1 - lambda2;
+ double eps = 0.000000001;
+ bool belongs = false;
+ if ((0 - eps <= lambda1) && (lambda1 <= 1 + eps)) {
+ if ((0 - eps <= lambda2) && (lambda2 <= 1 + eps)) {
+ if ((0 - eps <= lambda3) && (lambda3 <= 1 + eps)) {
+ belongs = true;
+ }
+ }
+ }
+ return belongs;
}
-void PAW::populatePointTriangleMap(){
- //-1 stands for no triangle
- for(int i=0;i
#include
-#include
+
using namespace std;
using namespace cv;
diff --git a/Chapter7_HeadPoseEstimation/main.cpp b/Chapter7_HeadPoseEstimation/main.cpp
index b5bb08b..74efc5d 100755
--- a/Chapter7_HeadPoseEstimation/main.cpp
+++ b/Chapter7_HeadPoseEstimation/main.cpp
@@ -9,13 +9,12 @@
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
-/*#include
-#include
-#include */
+
#include
#include "PAW.h"
#include "Triangle.h"
+
#include
#include
#include
@@ -107,8 +106,8 @@ void drawPoints(Mat pcaset, PCA pca, PCA pcaTexture, std::vector& point
namedWindow("AAM");
- IplImage* img = cvLoadImage(imageFileName);
- Mat imageFrame(img);
+ //IplImage* img = cvLoadImage(imageFileName);
+ Mat imageFrame = imread(imageFileName);
int t, ellap;
@@ -117,8 +116,8 @@ void drawPoints(Mat pcaset, PCA pca, PCA pcaTexture, std::vector& point
sprintf(imageFileName,"09-%dm.jpg",imageCount);
- img = cvLoadImage(imageFileName);
- Mat image(img);
+
+ Mat image = imread(imageFileName);
createTrackbar("eigen1", "AAM", &value1, alphaMax);
createTrackbar("eigen2", "AAM", &value2, alphaMax);
@@ -194,8 +193,7 @@ void drawPoints(Mat pcaset, PCA pca, PCA pcaTexture, std::vector& point
if(c=='1') imageCount=1;
if(c=='2') imageCount=2;
if(c=='3') imageCount=3;
-
- cvReleaseImage(&img);
+
ellap = clock();
}
@@ -203,20 +201,23 @@ void drawPoints(Mat pcaset, PCA pca, PCA pcaTexture, std::vector& point
}
-CvSubdiv2D* init_delaunay( CvMemStorage* storage,
+Subdiv2D* init_delaunay( CvMemStorage* storage,
CvRect rect )
{
- CvSubdiv2D* subdiv;
+ Subdiv2D* subdiv = new Subdiv2D(rect);
+
- subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
+ /*subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
sizeof(CvSubdiv2DPoint),
sizeof(CvQuadEdge2D),
storage );
- cvInitSubdivDelaunay2D( subdiv, rect );
+ cvInitSubdivDelaunay2D( subdiv, rect );*/
return subdiv;
}
+//TODO: using draw_subdiv, which doesn't call draw_subdiv_edge
+/*
void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
{
CvSubdiv2DPoint* org_pt;
@@ -241,9 +242,13 @@ void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
}
}
+*/
+//TODO: using draw_subdiv from delaunay2.cpp for now.
+//Update code to use pca
+
int countFrame=0;
-void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,int par,CvNextEdgeType triangleDirection, std::vector points,Mat pcaSet,Mat originalImage, int imageIndex, Mat& warp_final, vector& triangleVertices)
+/*void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,int par,CvNextEdgeType triangleDirection, std::vector points,Mat pcaSet,Mat originalImage, int imageIndex, Mat& warp_final, vector& triangleVertices)
{
IplImage* triangleFrame = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,3);
@@ -334,14 +339,111 @@ void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,int par,CvNextEdgeType trian
Mat triangleMat(triangleFrame);
imshow("Triangle frame",triangleMat);
}
+*/
+
+
+static void draw_subdiv(Mat& img, Subdiv2D& subdiv, Scalar delaunay_color, std::vector points, Mat pcaSet, Mat originalImage, int imageIndex, Mat& warp_final, vector& triangleVertices){
+#if 1
+ vector triangleList;
+ subdiv.getTriangleList(triangleList);
+ vector pt(3);
+ CvPoint buf[3];
+
+
+ for (size_t i = 0; i < triangleList.size(); i++)
+ {
+ Vec6f t = triangleList[i];
+ pt[0] = Point(cvRound(t[0]), cvRound(t[1]));
+ pt[1] = Point(cvRound(t[2]), cvRound(t[3]));
+ pt[2] = Point(cvRound(t[4]), cvRound(t[5]));
+
+ int shouldPaint = 1;
+ for (int j = 0;j<3;j++) {
+ buf[j] = cvPoint(cvRound(pt[j].x), cvRound(pt[j].y));
+
+ if ((pt[j].x<0) || (pt[j].x>640))
+ shouldPaint = 0;
+ if ((pt[j].y<0) || (pt[j].y>480))
+ shouldPaint = 0;
+ }
+ if (shouldPaint) {
+ //cvFillConvexPoly(img, buf, 3, CV_RGB(0, .1 + par*10.0 / 255.0, 0), CV_AA, 0);
+
+ line(img, pt[0], pt[1], delaunay_color, 1, LINE_AA, 0);
+ line(img, pt[1], pt[2], delaunay_color, 1, LINE_AA, 0);
+ line(img, pt[2], pt[0], delaunay_color, 1, LINE_AA, 0);
+
+
+
+ int originalVertices[3];
+ for (int j = 0;j<3;j++) {
+ int px = buf[j].x;
+ int py = buf[j].y;
+ for (int k = 0;k(imageIndex, originalVertices[0] * 2);
+ int p1y = pcaSet.at(imageIndex, originalVertices[0] * 2 + 1);
+
+ int p2x = pcaSet.at(imageIndex, originalVertices[1] * 2);
+ int p2y = pcaSet.at(imageIndex, originalVertices[1] * 2 + 1);
+
+ int p3x = pcaSet.at(imageIndex, originalVertices[2] * 2);
+ int p3y = pcaSet.at(imageIndex, originalVertices[2] * 2 + 1);
+
+
+
+ Point2f srcTri[3];
+ Point2f dstTri[3];
+
+ srcTri[0] = Point2f(p1x, p1y);
+ srcTri[1] = Point2f(p2x, p2y);
+ srcTri[2] = Point2f(p3x, p3y);
+
+ dstTri[0] = Point2f(buf[0].x, buf[0].y);
+ dstTri[1] = Point2f(buf[1].x, buf[1].y);
+ dstTri[2] = Point2f(buf[2].x, buf[2].y);
+
+ warpTextureFromTriangle(srcTri, originalImage, dstTri, warp_final);
+
+
+ }
+ }
+#else
+ vector edgeList;
+ subdiv.getEdgeList(edgeList);
+ for (size_t i = 0; i < edgeList.size(); i++)
+ {
+ Vec4f e = edgeList[i];
+ Point pt0 = Point(cvRound(e[0]), cvRound(e[1]));
+ Point pt1 = Point(cvRound(e[2]), cvRound(e[3]));
+ line(img, pt0, pt1, delaunay_color, 1, LINE_AA, 0);
+ }
+#endif
+}
void createAAM(PCA pca, Mat pcaSet, PCA& pcaTexture, std::vector& pointsInsideHull,vector& triangleVertices){
- CvMemStorage* storage;
- CvSubdiv2D* subdiv;
+ //OpenCV3 CvMemStorage* storage;
+ Subdiv2D* subdiv;
CvRect rect = { 0, 0, 640, 480 };
- IplImage* asmFrame = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,3); //TODO parameterize size
- storage = cvCreateMemStorage(0);
- subdiv = cvCreateSubdivDelaunay2D(rect,storage);//init_delaunay( storage, rect );
+ //OpenCV3 IplImage* asmFrame = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,3); //TODO parameterize size
+ Mat* asmFrame = new Mat(Size(640,480), CV_32FC3);
+
+
+
+ //OpenCV3 storage = cvCreateMemStorage(0);
+ subdiv = new Subdiv2D(rect);//OpenCV3 cvCreateSubdivDelaunay2D(rect, storage);
+ //init_delaunay( storage, rect );
std::vector points;
for(int i=0;i& point
double y = pca.mean.at(0,2*i+1);
CvPoint point = cvPoint( cvRound(x), cvRound(y));
points.push_back(point);
- CvPoint2D32f fp = cvPoint2D32f(x, y);
- cvSubdivDelaunay2DInsert( subdiv, fp );
+ //OpenCV3 CvPoint2D32f fp = cvPoint2D32f(x, y);
+ Point2f fp(x, y);
+ //OpenCV3 cvSubdivDelaunay2DInsert( subdiv, fp );
+ subdiv->insert(fp);
}
@@ -403,18 +507,20 @@ void createAAM(PCA pca, Mat pcaSet, PCA& pcaTexture, std::vector& point
char imageFileName[200];
sprintf(imageFileName,"09-%dm.jpg",imageIndex+1);
- IplImage* img = cvLoadImage(imageFileName);
+ Mat img = imread(imageFileName);
Mat matImgFrame(img);
Mat warp_final;
warp_final = Mat::zeros( matImgFrame.rows, matImgFrame.cols, matImgFrame.type() );
+
+ //TODO: draw_subdiv(asmFrame,subdiv,10,CV_NEXT_AROUND_LEFT,points,pcaSet,matImgFrame,imageIndex,warp_final,triangleVertices);
+ //TODO: draw_subdiv(asmFrame,subdiv,10,CV_NEXT_AROUND_RIGHT,points,pcaSet,matImgFrame,imageIndex,warp_final,triangleVertices);
+ draw_subdiv(*asmFrame, *subdiv, Scalar(0, 0, 255), points, pcaSet, matImgFrame, imageIndex, warp_final, triangleVertices);
+
+
+ Scalar delaunay_color(0, 0, 255);
-
- draw_subdiv(asmFrame,subdiv,10,CV_NEXT_AROUND_LEFT,points,pcaSet,matImgFrame,imageIndex,warp_final,triangleVertices);
- draw_subdiv(asmFrame,subdiv,10,CV_NEXT_AROUND_RIGHT,points,pcaSet,matImgFrame,imageIndex,warp_final,triangleVertices);
-
-
int pointIndex = 0;
@@ -422,7 +528,7 @@ void createAAM(PCA pca, Mat pcaSet, PCA& pcaTexture, std::vector& point
for(int j=0;jwidthStep + pt.x *3;
+ int pos = pt.y* img.step + pt.x *3;
pcaTextureSet.at(imageIndex,3*j ) = ((double)*((uchar*)(warp_final.data + pos)))/255.0f;
@@ -431,7 +537,7 @@ void createAAM(PCA pca, Mat pcaSet, PCA& pcaTexture, std::vector& point
pointIndex++;
}
- cvReleaseImage(&img);
+ //OpenCV 3 cvReleaseImage(&img);
warp_final.release();
matImgFrame.release();
@@ -506,7 +612,7 @@ void testMain( int argc, char** argv ){
}
-int main( int argc, char** argv ){
+int main( int argc, char** argv ){
testMain(argc,argv);
return 0;
}
diff --git a/Chapter8_FaceRecognition/CMakeLists.txt b/Chapter8_FaceRecognition/CMakeLists.txt
index cc04edb..ed950b6 100755
--- a/Chapter8_FaceRecognition/CMakeLists.txt
+++ b/Chapter8_FaceRecognition/CMakeLists.txt
@@ -2,11 +2,8 @@ cmake_minimum_required (VERSION 2.6)
PROJECT(WebcamFaceRec)
-# Requires OpenCV v2.4.1 or later
-FIND_PACKAGE( OpenCV REQUIRED )
-IF (${OpenCV_VERSION} VERSION_LESS 2.4.1)
- MESSAGE(FATAL_ERROR "OpenCV version is not compatible : ${OpenCV_VERSION}. FaceRec requires atleast OpenCV v2.4.1")
-ENDIF()
+# Requires OpenCV v3.0 or later
+FIND_PACKAGE( OpenCV 3 REQUIRED )
SET(SRC
main.cpp
diff --git a/Chapter8_FaceRecognition/ImageUtils.h b/Chapter8_FaceRecognition/ImageUtils.h
index 6f14e73..d9f68c3 100755
--- a/Chapter8_FaceRecognition/ImageUtils.h
+++ b/Chapter8_FaceRecognition/ImageUtils.h
@@ -1,18 +1,18 @@
/*****************************************************************************
* Face Recognition using Eigenfaces or Fisherfaces
******************************************************************************
-* by Shervin Emami, 5th Dec 2012
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/openCV.html
******************************************************************************
-* Ch8 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch8 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
/*
* ImageUtils
* Handy utility functions for dealing with images in OpenCV (desktop or Android).
- * by Shervin Emami (shervin.emami@gmail.com), 27th May 2012.
+ * by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* The most recent version of this will always be available from "http://shervinemami.info/openCV.html"
*/
@@ -21,11 +21,10 @@
// OpenCV
-#include
-//#include
-#include
+#include "opencv2/imgproc/imgproc.hpp"
+
#ifdef USE_HIGHGUI
- #include
+ #include "opencv2/highgui/highgui.hpp"
#endif
@@ -51,18 +50,20 @@
// These functions will print using the LOG() function, using the same format as printf(). If you want it to be printed using a different
// function (such as for Android logcat output), then define LOG as your output function, otherwise it will use printf() by default.
#ifndef LOG
- // For stdout debug logging, with a new-line character on the end:
- #ifndef _MSC_VER
- // Compiles on GCC but not MSVC:
- #define LOG(fmt, args...) do {printf(fmt, ## args); printf("\n"); fflush(stdout);} while (0)
-// #define LOG printf
+ #ifdef __ANDROID__
+ // For Android debug logging to logcat:
+ #include
+ #define LOG(fmt, args...) (__android_log_print(ANDROID_LOG_INFO, "........", fmt, ## args))
#else
- #define LOG printf
+ // For stdout debug logging, with a new-line character on the end:
+ #ifndef _MSC_VER
+ // Compiles on GCC but maybe not MSVC:
+ //#define LOG(fmt, args...) do {printf(fmt, ## args); printf("\n"); fflush(stdout);} while (0)
+ #define LOG(...) do {} while (0)
+ #else
+ #define LOG(...) do {} while (0)
+ #endif
#endif
-
- //// For Android debug logging to logcat:
- //#include
- //#define LOG(fmt, args...) (__android_log_print(ANDROID_LOG_INFO, "........", fmt, ## args))
#endif
diff --git a/Chapter8_FaceRecognition/ImageUtils_0.7.cpp b/Chapter8_FaceRecognition/ImageUtils_0.7.cpp
index 821e2b4..88af112 100755
--- a/Chapter8_FaceRecognition/ImageUtils_0.7.cpp
+++ b/Chapter8_FaceRecognition/ImageUtils_0.7.cpp
@@ -1,18 +1,18 @@
/*****************************************************************************
* Face Recognition using Eigenfaces or Fisherfaces
******************************************************************************
-* by Shervin Emami, 5th Dec 2012
+* by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* http://www.shervinemami.info/openCV.html
******************************************************************************
-* Ch8 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch8 of the book "Mastering OpenCV with Practical Computer Vision Projects", 2nd Edition.
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
/*
* ImageUtils
* Handy utility functions for dealing with images in OpenCV (desktop or Android).
- * by Shervin Emami (shervin.emami@gmail.com), 27th May 2012.
+ * by Shervin Emami, 8th Aug 2016 (shervin.emami@gmail.com)
* The most recent version of this will always be available from "http://shervinemami.info/openCV.html"
*/
@@ -611,7 +611,7 @@ IplImage* drawIntGraph(const int *arraySrc, int nArrayLength, IplImage *imageDst
//cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.5,0.6, 0,1, CV_AA); // For OpenCV 2.0
CvScalar clr = GREY;
char text[16];
- snprintf(text, sizeof(text)-1, "%.1f", maxV);
+ snprintf(text, sizeof(text)-1, "%d", maxV);
cvPutText(imageGraph, text, cvPoint(1, b+4), &font, clr);
// Write the scale of the x axis
snprintf(text, sizeof(text)-1, "%d", (nArrayLength-1) );
@@ -700,7 +700,7 @@ IplImage* drawUCharGraph(const uchar *arraySrc, int nArrayLength, IplImage *imag
//cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.5,0.6, 0,1, CV_AA); // For OpenCV 2.0
CvScalar clr = GREY;
char text[16];
- snprintf(text, sizeof(text)-1, "%.1f", maxV);
+ snprintf(text, sizeof(text)-1, "%d", maxV);
cvPutText(imageGraph, text, cvPoint(1, b+4), &font, clr);
// Write the scale of the x axis
snprintf(text, sizeof(text)-1, "%d", (nArrayLength-1) );
@@ -1699,245 +1699,6 @@ IplImage* smoothImageBilateral(const IplImage *src, float smoothness)
return imageOut;
}
-
-// Paste multiple images next to each other as a single image, for saving or displaying.
-// Remember to free the returned image.
-// Sample usage: cvSaveImage("out.png", combineImages(2, img1, img2) );
-// Modified by Shervin from the cvShowManyImages() function on the OpenCVWiki by Parameswaran.
-// 'combineImagesResized()' will resize all images to 300x300, whereas 'combineImages()' doesn't resize the images at all.
-IplImage* combineImagesResized(int nArgs, ...)
-{
- // img - Used for getting the arguments
- IplImage *img;
-
- // DispImage - the image in which input images are to be copied
- IplImage *DispImage;
-
- int size;
- int i;
- int m, n;
- int x, y;
-
- // w - Maximum number of images in a row
- // h - Maximum number of images in a column
- int w, h;
-
- // scale - How much we have to resize the image
- float scale;
- int max;
-
- // If the number of arguments is lesser than 0 or greater than 12
- // return without displaying
- if(nArgs <= 0) {
- printf("Number of arguments too small....\n");
- return NULL;
- }
- else if(nArgs > 12) {
- printf("Number of arguments too large....\n");
- return NULL;
- }
- // Determine the size of the image,
- // and the number of rows/cols
- // from number of arguments
- else if (nArgs == 1) {
- w = h = 1;
- size = 300;
- }
- else if (nArgs == 2) {
- w = 2; h = 1;
- size = 300;
- }
- else if (nArgs == 3 || nArgs == 4) {
- w = 2; h = 2;
- size = 300;
- }
- else if (nArgs == 5 || nArgs == 6) {
- w = 3; h = 2;
- size = 200;
- }
- else if (nArgs == 7 || nArgs == 8) {
- w = 4; h = 2;
- size = 200;
- }
- else {
- w = 4; h = 3;
- size = 150;
- }
-
- // Create a new 3 channel image
- DispImage = cvCreateImage( cvSize(100 + size*w, 60 + size*h), 8, 3 );
-
- // Used to get the arguments passed
- va_list args;
- va_start(args, nArgs);
-
- // Loop for nArgs number of arguments
- for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {
-
- // Get the Pointer to the IplImage
- img = va_arg(args, IplImage*);
-
- // Make sure a proper image has been obtained
- if(img) {
-
- // Find the width and height of the image
- x = img->width;
- y = img->height;
-
- // Find whether height or width is greater in order to resize the image
- max = (x > y)? x: y;
-
- // Find the scaling factor to resize the image
- scale = (float) ( (float) max / size );
-
- // Used to Align the images
- if( i % w == 0 && m!= 20) {
- m = 20;
- n+= 20 + size;
- }
-
- // Make sure we have a color image. If its greyscale, then convert it to color.
- IplImage *colorImg = 0;
- IplImage *currImg = img;
- if (img->nChannels == 1) {
- colorImg = cvCreateImage(cvSize(img->width, img->height), 8, 3 );
- //std::cout << "[Converting greyscale image " << greyImg->width << "x" << greyImg->height << "px to color for combineImages()]" << std::endl;
- cvCvtColor( img, colorImg, CV_GRAY2BGR );
- currImg = colorImg; // Use the greyscale version as the input.
- }
-
- // Set the image ROI to display the current image
- cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));
-
- // Resize the input image and copy it to the Single Big Image
- cvResize(currImg, DispImage, CV_INTER_CUBIC);
-
- // Reset the ROI in order to display the next image
- cvResetImageROI(DispImage);
-
- if (colorImg)
- cvReleaseImage(&colorImg);
- }
- else { // This input image is NULL
- //printf("Error in combineImages(): Bad image%d given as argument\n", i);
- //cvReleaseImage(&DispImage); // Release the image and return
- //return NULL;
- }
- }
-
- // End the number of arguments
- va_end(args);
-
- return DispImage;
-}
-
-
-
-// Paste multiple images next to each other as a single image, for saving or displaying.
-// Remember to free the returned image.
-// Sample usage: cvSaveImage("out.png", combineImages(2, img1, img2) );
-// Modified by Shervin from the cvShowManyImages() function on the OpenCVWiki by Parameswaran.
-// 'combineImagesResized()' will resize all images to 300x300, whereas 'combineImages()' doesn't resize the images at all.
-IplImage* combineImages(int nArgs, ...)
-{
- const int MAX_COMBINED_IMAGES = 6;
- int col1Width, col2Width;
- int row1Height, row2Height, row3Height;
- IplImage *imageArray[MAX_COMBINED_IMAGES];
- int xPos[MAX_COMBINED_IMAGES];
- int yPos[MAX_COMBINED_IMAGES];
- int wImg[MAX_COMBINED_IMAGES] = {0}; // image dimensions are assumed to be 0, if they dont exist.
- int hImg[MAX_COMBINED_IMAGES] = {0};
- //int rows, columns; // number of rows & cols of images.
- int wP, hP; // dimensions of the combined image.
- int i;
- int nGoodImages = 0;
- IplImage *combinedImage;
- int B = 5; // Border size, in pixels
-
- // Load all the images that were passed as arguments
- va_list args; // Used to get the arguments passed
- va_start(args, nArgs);
- for (i = 0; i < nArgs; i++) {
- // Get the Pointer to the IplImage
- IplImage *img = va_arg(args, IplImage*);
- // Make sure a proper image has been obtained, and that there aren't too many images already.
- if ((img != 0 && img->width > 0 && img->height > 0) && (nGoodImages < MAX_COMBINED_IMAGES) ) {
- // Add the new image to the array of images
- imageArray[nGoodImages] = img;
- wImg[nGoodImages] = img->width;
- hImg[nGoodImages] = img->height;
- nGoodImages++;
- }
- }
-
- // If the number of arguments is lesser than 0 or greater than 12,
- // return without displaying
- if( nGoodImages <= 0 || nGoodImages > MAX_COMBINED_IMAGES ) {
- printf("Error in combineImages(): Cant display %d of %d images\n", nGoodImages, nArgs);
- return NULL;
- }
-
- // Determine the size of the combined image & number of rows/cols.
- //columns = MIN(nGoodImages, 2); // 1 or 2 columns
- //rows = (nGoodImages-1) / 2; // 1 or 2 or 3 or ... rows
- col1Width = MAX(wImg[0], MAX(wImg[2], wImg[4]));
- col2Width = MAX(wImg[1], MAX(wImg[3], wImg[5]));
- row1Height = MAX(hImg[0], hImg[1]);
- row2Height = MAX(hImg[2], hImg[3]);
- row3Height = MAX(hImg[4], hImg[5]);
- wP = B + col1Width + B + (col2Width ? col2Width + B : 0);
- hP = B + row1Height + B + (row2Height ? row2Height + B : 0) + (row3Height ? row3Height + B : 0);
- xPos[0] = B;
- yPos[0] = B;
- xPos[1] = B + col1Width + B;
- yPos[1] = B;
- xPos[2] = B;
- yPos[2] = B + row1Height + B;
- xPos[3] = B + col1Width + B;
- yPos[3] = B + row1Height + B;
- xPos[4] = B;
- yPos[4] = B + row1Height + B + row2Height + B;
- xPos[5] = B + col1Width + B;
- yPos[5] = B + row1Height + B + row2Height + B;
-
- // Create a new RGB image
- combinedImage = cvCreateImage( cvSize(wP, hP), 8, 3 );
- if (!combinedImage)
- return NULL;
-
- // Clear the background
- cvSet(combinedImage, CV_RGB(50,50,50));
-
- for (i=0; i < nGoodImages; i++) {
- IplImage *img = imageArray[i];
-
- // Make sure we have a color image. If its greyscale, then convert it to color.
- IplImage *colorImg = 0;
- if (img->nChannels == 1) {
- colorImg = cvCreateImage(cvSize(img->width, img->height), 8, 3 );
- cvCvtColor( img, colorImg, CV_GRAY2BGR );
- img = colorImg; // Use the greyscale version as the input.
- }
-
- // Set the image ROI to display the current image
- cvSetImageROI(combinedImage, cvRect(xPos[i], yPos[i], img->width, img->height));
- // Draw this image into its position
- cvCopy(img, combinedImage);
- // Reset the ROI in order to display the next image
- cvResetImageROI(combinedImage);
-
- if (colorImg)
- cvReleaseImage(&colorImg);
- }
-
- // End the number of arguments
- va_end(args);
-
- return combinedImage;
-}
-
-
// Blend color images 'image1' and 'image2' using an 8-bit alpha-blending mask channel.
// Equivalent to this operation on each pixel: imageOut = image1 * (1-(imageAlphaMask/255)) + image2 * (imageAlphaMask/255)
// So if a pixel in imageAlphMask is 0, then that pixel in imageOut will be image1, or if imageAlphaMask is 255 then imageOut is image2,
@@ -2130,20 +1891,3 @@ void saveFloatImage(const char *filename, const IplImage *srcImg)
#endif
}
-// Draw some text onto an image using printf() format.
-void drawText(IplImage *img, CvPoint position, CvScalar color, char *fmt, ...)
-{
- // Write the given arguments to 'szMsg' using printf() style formatting.
- va_list marker;
- char szMsg[1024];
- va_start(marker, fmt);
- vsprintf(szMsg, fmt, marker);
- va_end(marker);
- //marker = marker; // stop warning messages
-
- // Display the text onto the image.
- CvFont font;
- cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.3,0.3, 0, 1, CV_AA);
- cvPutText(img, szMsg, position, &font, color);
-}
-
diff --git a/Chapter8_FaceRecognition/main.cpp b/Chapter8_FaceRecognition/main.cpp
index ffd7338..95afc1f 100755
--- a/Chapter8_FaceRecognition/main.cpp
+++ b/Chapter8_FaceRecognition/main.cpp
@@ -1,17 +1,17 @@
/*****************************************************************************
-* Face Recognition using Eigenfaces or Fisherfaces
-******************************************************************************
-* by Shervin Emami, 5th Dec 2012
-* http://www.shervinemami.info/openCV.html
-******************************************************************************
-* Ch8 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-*****************************************************************************/
+ * * Face Recognition using Eigenfaces or Fisherfaces
+ * ******************************************************************************
+ * * by Shervin Emami, 8th Dec 2016
+ * * http://www.shervinemami.info/openCV.html
+ * ******************************************************************************
+ * * Ch8 of 2nd Edition of the book "Mastering OpenCV with Practical Computer Vision Projects"
+ * * Copyright Packt Publishing 2016.
+ * * http://www.packtpub.com/cool-projects-with-opencv/book
+ * *****************************************************************************/
//////////////////////////////////////////////////////////////////////////////////////
-// WebcamFaceRec.cpp, by Shervin Emami (www.shervinemami.info) on 30th May 2012.
-// Face Detection & Face Recognition from a webcam using LBP and Eigenfaces or Fisherfaces.
+// WebcamFaceRec.cpp, by Shervin Emami (www.shervinemami.info) on 8th Dec 2016.
+// Face Detection & Face Recognition from a webcam using Eigenfaces or Fisherfaces.
//////////////////////////////////////////////////////////////////////////////////////
//
// Some parts are based on the tutorial & code by Robin Hewitt (2007) at:
@@ -20,17 +20,18 @@
// Some parts are based on the tutorial & code by Shervin Emami (2009) at:
// "http://www.shervinemami.info/faceRecognition.html"
//
-// Requires OpenCV v2.4.1 or later (from June 2012), otherwise the FaceRecognizer will not compile or run.
+// Requires OpenCV v3.0 or later (2015), otherwise the FaceRecognizer will not compile or run!
+// If you need to use OpenCV v2.4 or LBPH, use the 1st Edition of the book.
//
//////////////////////////////////////////////////////////////////////////////////////
-// The Face Recognition algorithm can be one of these and perhaps more, depending on your version of OpenCV, which must be atleast v2.4.1:
+// The Face Recognition algorithm can be one of these and perhaps more, depending on your version of OpenCV, which must be atleast v3.0.0:
// "FaceRecognizer.Eigenfaces": Eigenfaces, also referred to as PCA (Turk and Pentland, 1991).
// "FaceRecognizer.Fisherfaces": Fisherfaces, also referred to as LDA (Belhumeur et al, 1997).
-// "FaceRecognizer.LBPH": Local Binary Pattern Histograms (Ahonen et al, 2006).
-const char *facerecAlgorithm = "FaceRecognizer.Fisherfaces";
-//const char *facerecAlgorithm = "FaceRecognizer.Eigenfaces";
+// Note: The LBPH algorithm was also available using the 1st Edition of Mastering OpenCV (with OpenCV 2.4)
+//const char *facerecAlgorithm = "FaceRecognizer.Fisherfaces";
+const char *facerecAlgorithm = "FaceRecognizer.Eigenfaces";
// Sets how confident the Face Verification algorithm should be to decide if it is an unknown person or a known person.
@@ -320,7 +321,7 @@ void onMouse(int event, int x, int y, int, void*)
// Main loop that runs forever, until the user hits Escape to quit.
void recognizeAndTrainUsingWebcam(VideoCapture &videoCapture, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2)
{
- Ptr model;
+ Ptr model;
vector preprocessedFaces;
vector faceLabels;
Mat old_prepreprocessedFace;
@@ -634,7 +635,7 @@ int main(int argc, char *argv[])
CascadeClassifier eyeCascade2;
VideoCapture videoCapture;
- cout << "WebcamFaceRec, by Shervin Emami (www.shervinemami.info), June 2012." << endl;
+ cout << "WebcamFaceRec, by Shervin Emami (www.shervinemami.info), Dec 2016." << endl;
cout << "Realtime face detection + face recognition from a webcam using LBP and Eigenfaces or Fisherfaces." << endl;
cout << "Compiled with OpenCV version " << CV_VERSION << endl << endl;
diff --git a/Chapter8_FaceRecognition/recognition.cpp b/Chapter8_FaceRecognition/recognition.cpp
index 57a2c19..baad09c 100755
--- a/Chapter8_FaceRecognition/recognition.cpp
+++ b/Chapter8_FaceRecognition/recognition.cpp
@@ -1,19 +1,20 @@
/*****************************************************************************
* Face Recognition using Eigenfaces or Fisherfaces
******************************************************************************
-* by Shervin Emami, 5th Dec 2012
+* by Shervin Emami, 8th Dec 2016
* http://www.shervinemami.info/openCV.html
******************************************************************************
-* Ch8 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
+* Ch8 of 2nd Edition of the book "Mastering OpenCV with Practical Computer Vision Projects"
+* Copyright Packt Publishing 2016.
* http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
//////////////////////////////////////////////////////////////////////////////////////
-// recognition.cpp, by Shervin Emami (www.shervinemami.info) on 30th May 2012.
+// recognition.cpp, by Shervin Emami (www.shervinemami.info) on 8th Dec 2016.
// Train the face recognition system on a given dataset, and recognize the person from a given image.
//////////////////////////////////////////////////////////////////////////////////////
-// Requires OpenCV v2.4.1 or later (from June 2012), otherwise the FaceRecognizer will not compile or run!
+// Requires OpenCV v3.0 or later (2015), otherwise the FaceRecognizer will not compile or run!
+// If you need to use OpenCV v2.4 or LBPH, use the 1st Edition of the book.
//////////////////////////////////////////////////////////////////////////////////////
@@ -22,29 +23,35 @@
#include "ImageUtils.h" // Shervin's handy OpenCV utility functions.
// Start training from the collected faces.
-// The face recognition algorithm can be one of these and perhaps more, depending on your version of OpenCV, which must be atleast v2.4.1:
+// The face recognition algorithm can be one of these and perhaps more, depending on your version of OpenCV, which must be atleast v3.0.0:
// "FaceRecognizer.Eigenfaces": Eigenfaces, also referred to as PCA (Turk and Pentland, 1991).
// "FaceRecognizer.Fisherfaces": Fisherfaces, also referred to as LDA (Belhumeur et al, 1997).
-// "FaceRecognizer.LBPH": Local Binary Pattern Histograms (Ahonen et al, 2006).
-Ptr learnCollectedFaces(const vector preprocessedFaces, const vector faceLabels, const string facerecAlgorithm)
+// Note: The LBPH algorithm was also available using the 1st Edition of Mastering OpenCV (with OpenCV 2.4)
+Ptr learnCollectedFaces(const vector preprocessedFaces, const vector faceLabels, const string facerecAlgorithm)
{
- Ptr model;
+ Ptr model;
cout << "Learning the collected faces using the [" << facerecAlgorithm << "] algorithm ..." << endl;
- // Make sure the "contrib" module is dynamically loaded at runtime.
+ /*// Make sure the "contrib" module is dynamically loaded at runtime.
// Requires OpenCV v2.4.1 or later (from June 2012), otherwise the FaceRecognizer will not compile or run!
bool haveContribModule = initModule_contrib();
if (!haveContribModule) {
cerr << "ERROR: The 'contrib' module is needed for FaceRecognizer but has not been loaded into OpenCV!" << endl;
exit(1);
}
+ */
+
+ // Use the new FaceRecognizer class in the "contrib" module of OpenCV 3:
+ if (facerecAlgorithm.compare("FaceRecognizer.Eigenfaces") == 0)
+ model = createEigenFaceRecognizer();
+ else if (facerecAlgorithm.compare("FaceRecognizer.Fisherfaces") == 0)
+ model = createFisherFaceRecognizer();
+ //else if (facerecAlgorithm.compare("FaceRecognizer.LBPH") == 0)
+ //model = createLBPHFaceRecognizer();
- // Use the new FaceRecognizer class in OpenCV's "contrib" module:
- // Requires OpenCV v2.4.1 or later (from June 2012), otherwise the FaceRecognizer will not compile or run!
- model = Algorithm::create(facerecAlgorithm);
if (model.empty()) {
- cerr << "ERROR: The FaceRecognizer algorithm [" << facerecAlgorithm << "] is not available in your version of OpenCV. Please update to OpenCV v2.4.1 or newer." << endl;
+ cerr << "ERROR: The FaceRecognizer algorithm [" << facerecAlgorithm << "] is not available in your version of OpenCV. Please make sure the 'contrib' module is built into your OpenCV library." << endl;
exit(1);
}
@@ -67,12 +74,12 @@ Mat getImageFrom1DFloatMat(const Mat matrixRow, int height)
}
// Show the internal face recognition data, to help debugging.
-void showTrainingDebugData(const Ptr model, const int faceWidth, const int faceHeight)
+void showTrainingDebugData(const Ptr model, const int faceWidth, const int faceHeight)
{
try { // Surround the OpenCV calls by a try/catch block so we don't crash if some model parameters aren't available.
// Show the average face (statistical average for each pixel in the collected images).
- Mat averageFaceRow = model->get("mean");
+ Mat averageFaceRow = model->getMean();
printMatInfo(averageFaceRow, "averageFaceRow");
// Convert the matrix row (1D float matrix) to a regular 8-bit image.
Mat averageFace = getImageFrom1DFloatMat(averageFaceRow, faceHeight);
@@ -80,7 +87,7 @@ void showTrainingDebugData(const Ptr model, const int faceWidth,
imshow("averageFace", averageFace);
// Get the eigenvectors
- Mat eigenvectors = model->get("eigenvectors");
+ Mat eigenvectors = model->getEigenVectors();
printMatInfo(eigenvectors, "eigenvectors");
// Show the best 20 eigenfaces
@@ -97,13 +104,13 @@ void showTrainingDebugData(const Ptr model, const int faceWidth,
}
// Get the eigenvalues
- Mat eigenvalues = model->get("eigenvalues");
+ Mat eigenvalues = model->getEigenValues();
printMat(eigenvalues, "eigenvalues");
//int ncomponents = model->get("ncomponents");
//cout << "ncomponents = " << ncomponents << endl;
- vector projections = model->get >("projections");
+ vector projections = model->getProjections();
cout << "projections: " << projections.size() << endl;
for (int i = 0; i < (int)projections.size(); i++) {
printMat(projections[i], "projections");
@@ -120,24 +127,24 @@ void showTrainingDebugData(const Ptr model, const int faceWidth,
// Generate an approximately reconstructed face by back-projecting the eigenvectors & eigenvalues of the given (preprocessed) face.
-Mat reconstructFace(const Ptr model, const Mat preprocessedFace)
+Mat reconstructFace(const Ptr model, const Mat preprocessedFace)
{
// Since we can only reconstruct the face for some types of FaceRecognizer models (ie: Eigenfaces or Fisherfaces),
// we should surround the OpenCV calls by a try/catch block so we don't crash for other models.
try {
// Get some required data from the FaceRecognizer model.
- Mat eigenvectors = model->get("eigenvectors");
- Mat averageFaceRow = model->get("mean");
+ Mat eigenvectors = model->getEigenVectors();
+ Mat averageFaceRow = model->getMean();
int faceHeight = preprocessedFace.rows;
// Project the input image onto the PCA subspace.
- Mat projection = subspaceProject(eigenvectors, averageFaceRow, preprocessedFace.reshape(1,1));
+ Mat projection = LDA::subspaceProject(eigenvectors, averageFaceRow, preprocessedFace.reshape(1,1));
//printMatInfo(projection, "projection");
// Generate the reconstructed face back from the PCA subspace.
- Mat reconstructionRow = subspaceReconstruct(eigenvectors, averageFaceRow, projection);
+ Mat reconstructionRow = LDA::subspaceReconstruct(eigenvectors, averageFaceRow, projection);
//printMatInfo(reconstructionRow, "reconstructionRow");
// Convert the float row matrix to a regular 8-bit image. Note that we
diff --git a/Chapter8_FaceRecognition/recognition.h b/Chapter8_FaceRecognition/recognition.h
index 7a868e2..002592f 100755
--- a/Chapter8_FaceRecognition/recognition.h
+++ b/Chapter8_FaceRecognition/recognition.h
@@ -25,9 +25,11 @@
// Include OpenCV's C++ Interface
#include "opencv2/opencv.hpp"
+#include "opencv2/face.hpp"
using namespace cv;
+using namespace cv::face;
using namespace std;
@@ -36,14 +38,14 @@ using namespace std;
// The face recognition algorithm can be one of these and perhaps more, depending on your version of OpenCV, which must be atleast v2.4.1:
// "FaceRecognizer.Eigenfaces": Eigenfaces, also referred to as PCA (Turk and Pentland, 1991).
// "FaceRecognizer.Fisherfaces": Fisherfaces, also referred to as LDA (Belhumeur et al, 1997).
-// "FaceRecognizer.LBPH": Local Binary Pattern Histograms (Ahonen et al, 2006).
-Ptr learnCollectedFaces(const vector preprocessedFaces, const vector faceLabels, const string facerecAlgorithm = "FaceRecognizer.Eigenfaces");
+// Note: The LBPH algorithm was also available using the 1st Edition of Mastering OpenCV (with OpenCV 2.4)
+Ptr learnCollectedFaces(const vector preprocessedFaces, const vector faceLabels, const string facerecAlgorithm = "FaceRecognizer.Eigenfaces");
// Show the internal face recognition data, to help debugging.
-void showTrainingDebugData(const Ptr model, const int faceWidth, const int faceHeight);
+void showTrainingDebugData(const Ptr model, const int faceWidth, const int faceHeight);
// Generate an approximately reconstructed face by back-projecting the eigenvectors & eigenvalues of the given (preprocessed) face.
-Mat reconstructFace(const Ptr model, const Mat preprocessedFace);
+Mat reconstructFace(const Ptr model, const Mat preprocessedFace);
// Compare two images by getting the L2 error (square-root of sum of squared error).
double getSimilarity(const Mat A, const Mat B);
diff --git a/Chapter9_FluidInteractionUsingKinect/FluidSolver.cpp b/Chapter9_FluidInteractionUsingKinect/FluidSolver.cpp
deleted file mode 100755
index 9e6cbcd..0000000
--- a/Chapter9_FluidInteractionUsingKinect/FluidSolver.cpp
+++ /dev/null
@@ -1,412 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file FluidSolver.cpp
- * @author Austin Hines
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.0
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- */
-
-#include "FluidSolver.h"
-#include
-#include
-
-
-#define ROW_WIDTH N_+2
-#define IX(i,j) ((i)+(ROW_WIDTH)*(j))
-#define FOR_EACH_CELL for (i=1 ; i<=N_ ; i++) { for (j=1 ; j<=N_ ; j++) {
-#define END_FOR }}
-#define SWAP(x0,x) { float* tmp=x0; x0=x; x=tmp; }
-
-
-
-FluidSolver::FluidSolver(void)
-{
- FluidSolver(128, 0.1f, 0.00f, 0.0f);
-}
-
-
-
-FluidSolver::FluidSolver(int N, float dt, float diff, float visc)
-{
- N_ = N;
- dt_ = dt;
- diff_ = diff;
- visc_ = visc;
-
- int size = getSize();
-
- u_ = (float *) malloc(size * sizeof(float));
- v_ = (float *) malloc(size * sizeof(float));
- u_prev_ = (float *) malloc(size * sizeof(float));
- v_prev_ = (float *) malloc(size * sizeof(float));
- dens_ = (float *) malloc(size * sizeof(float));
- dens_prev_ = (float *) malloc(size * sizeof(float));
- bounds_ = (bool *) malloc(size * sizeof(bool) );
-
-}
-
-
-
-FluidSolver::~FluidSolver(void)
-{
- if ( u_ ) free ( u_ );
- if ( v_ ) free ( v_ );
- if ( u_prev_ ) free ( u_prev_ );
- if ( v_prev_ ) free ( v_prev_ );
- if ( dens_ ) free ( dens_ );
- if ( bounds_ ) free ( bounds_ );
- if ( dens_prev_ ) free ( dens_prev_ );
-}
-
-
-
-void FluidSolver::reset()
-{
- for (int i=0 ; i < getSize() ; i++) {
- u_[i] = v_[i] = u_prev_[i] = v_prev_[i] = dens_[i] = dens_prev_[i] = 0.0f;
- bounds_[i] = false;
- }
-}
-
-
-
-void FluidSolver::update()
-{
- computeDensityStep(dens_, dens_prev_, u_, v_);
- computeVelocityStep(u_, v_, u_prev_, v_prev_);
-
- //reset u_prev_, v_prev_, and dens_prev
- for (int i=0 ; i < getSize() ; i++)
- u_prev_[i] = v_prev_[i] = dens_prev_[i] = 0.0f;
-}
-
-
-
-//TODO: Can increase efficiency by only testing valid coordinates
-// when emitters are created.
-void FluidSolver::addVertVelocityAt(int x, int y, float value)
-{
- if(isValidCoordinate(x, y))
- v_prev_[IX(x,y)] += value;
-}
-
-
-
-void FluidSolver::addHorzVelocityAt(int x, int y, float value)
-{
- if(isValidCoordinate(x, y))
- u_prev_[IX(x,y)] += value;
-}
-
-
-
-void FluidSolver::addDensityAt(int x, int y, float value)
-{
- if(isValidCoordinate(x, y))
- dens_prev_[IX(x,y)] += value;
-}
-
-
-
-void FluidSolver::setBoundAt(int x, int y, bool isBound)
-{
- if(isValidCoordinate(x, y))
- bounds_[IX(x,y)] = isBound;
-}
-
-
-
-//accessors
-bool FluidSolver::isBoundAt(int x, int y)
-{
- return bounds_[IX(x,y)];
-}
-
-
-
-float FluidSolver::getDensityAt(int x, int y)
-{
- return dens_[IX(x,y)];
-}
-
-
-
-float FluidSolver::getVertVelocityAt(int x, int y)
-{
- return v_[IX(x,y)];
-}
-
-
-
-float FluidSolver::getHorzVelocityAt(int x, int y)
-{
- return u_[IX(x,y)];
-}
-
-
-
-///protected functions
-int FluidSolver::getSize()
-{
- return (ROW_WIDTH) * (ROW_WIDTH);
-}
-
-
-
-bool FluidSolver::isValidCoordinate(int x, int y)
-{
- bool xIsValid = (x >= 1) && (x <= N_);
- bool yIsValid = (y >= 1) && (y <= N_);
-
- if(xIsValid && yIsValid)
- return true;
- else
- return false;
-}
-
-
-
-void FluidSolver::addSource(float* x, float* s)
-{
- int i;
- for (i=0 ; i N_ + 0.5f) x = N_ + 0.5f;
- i0 = (int)x;
- i1 = i0 + 1;
-
- //limit y coordinate to fall within the grid
- if (y < 0.5f) y = 0.5f;
- if (y > N_ + 0.5f) y = N_ + 0.5f;
- j0 = (int)y;
- j1 = j0 + 1;
-
- s1 = x - i0; //difference between calculated x position and limited x position
- s0 = 1 - s1; //calculate relative x distance from center of this cell
-
- t1 = y - j0;
- t0 = 1 - t1;
-
- //blend several values from the velocity grid together, based on where in the cell
- //the new coordinate valls
- d[IX(i,j)] = s0 * (t0 * d0[IX(i0,j0)] + t1 * d0[IX(i0,j1)]) +
- s1 * (t0 * d0[IX(i1,j0)] + t1 * d0[IX(i1,j1)]);
- END_FOR
- setBounds(boundsFlag, d);
-}
-
-
-
-void FluidSolver::project( float* u, float* v, float* p, float* div)
-{
- int i, j;
-
- float h = 1.0 / N_; //calculate unit length of each cell relative to the whole grid.
-
- FOR_EACH_CELL
- //calculate initial solution to gradient field based on the difference in velocities of
- //surrounding cells.
- div[IX(i, j)] = -0.5f * h * (u[IX(i + 1, j)] - u[IX(i - 1, j)] + v[IX(i, j + 1)] - v[IX(i, j - 1)]);
- //set projected solution values to be zero
- p[IX(i, j)] = 0;
- END_FOR
-
- //set bounds for diffusion
- setBounds(0, div);
- setBounds(0, p);
-
- // calculate gradient (height) field
- linearSolve (0, p, div, 1, 4);
-
- FOR_EACH_CELL
- //subtract gradient field from current velocities
- u[IX(i, j)] -= 0.5f * N_ * (p[IX(i + 1, j)] - p[IX(i - 1, j)]);
- v[IX(i, j)] -= 0.5f * N_ * (p[IX(i, j + 1)] - p[IX(i, j - 1)]);
- END_FOR
-
- //set boundaries for velocity
- setBounds(1, u);
- setBounds(2, v);
-}
-
-
-
-void FluidSolver::computeDensityStep( float* x, float* x0, float* u, float* v )
-{
- addSource (x, x0);
- SWAP(x0, x);
- diffuse(0, x, x0);
- SWAP(x0, x);
- advect(0, x, x0, u, v);
-}
-
-
-
-void FluidSolver::computeVelocityStep (float* u, float* v, float* u0, float* v0)
-{
- addSource(u, u0);
- addSource(v, v0);
- //diffuse horizontal
- SWAP(u0, u);
- diffuse(1, u, u0);
-
- //diffuse vertical
- SWAP(v0, v);
- diffuse(2, v, v0);
- project (u, v, u0, v0);
- SWAP(u0, u);
- SWAP (v0, v);
-
- //advect velocities
- advect(1, u, u0, u0, v0);
- advect(2, v, v0, u0, v0);
- project(u, v, u0, v0);
-}
diff --git a/Chapter9_FluidInteractionUsingKinect/FluidSolver.h b/Chapter9_FluidInteractionUsingKinect/FluidSolver.h
deleted file mode 100755
index 5bbb74d..0000000
--- a/Chapter9_FluidInteractionUsingKinect/FluidSolver.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file FluidSolver.h
- * @author Austin Hines
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.0
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- */
-
-#pragma once
-#include
-
-using namespace std;
-
-/**
- * A 2D fluid simulation solver based on the Stable Fluids algorithm.
- *
- * This algorithm based on original C code from Jos Stam's 2001 paper, "Real Time Fluid
- * Dynamics for Games." I have adapted this code to C++ style and classes (vectors instead
- * of arrays, etc.)
- *
- */
-class FluidSolver
-{
-public:
- //TODO: change all comments to reflect the switch to vectors.
-
- /**
- * Default constructor.
- *
- */
- FluidSolver(void);
-
- /**
- * Parameter constructor.
- * @param N Width (and height) of the square fluid simulation grid
- * @param dt Timestep size
- * @param diff Diffusion coefficient
- * @param visc Viscosity coefficient
- */
- FluidSolver(int N, float dt, float diff, float visc);
- ~FluidSolver(void);
-
- /**
- * Adds vertical velocity values at a particular coordinate.
- *
- * The system is designed to add values from multiple other sources using this method,
- * and then commit them to the simulation using update(). Valid indicies range from
- * 1 to N. Indicies 0 and N+1 are buffer rows for algorithms.
- *
- * @param x x-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @param value New vertical velocity value
- */
- void addVertVelocityAt(int x, int y, float value);
-
-
- /**
- * Adds horizontal velocity values at a particular coordinate.
- *
- * The system is designed to add values from multiple other sources using this method,
- * and then commit them to the simulation using update(). Valid indicies range from
- * 1 to N. Indicies 0 and N+1 are buffer rows for algorithms.
- *
- * @param x x-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @param value New horizontal velocity value
- */
- void addHorzVelocityAt(int x, int y, float value);
-
-
- /**
- * Adds density values at a particular coordinate.
- *
- * The system is designed to add values from multiple other sources using this method,
- * and then commit them to the simulation using update(). Valid indicies range from
- * 1 to N. Indicies 0 and N+1 are buffer rows for algorithms.
- *
- * @param x x-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @param value Density value to be added
- */
- void addDensityAt(int x, int y, float value);
-
- /**
- * Sets bound condition at a given cell.
- *
- * @param x x-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @param isBound boolean true/false
- */
- void setBoundAt(int x, int y, bool isBound);
-
- /**
- * Accesor: returns boundary value at given cell.
- *
- * @param x x-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @return boolean indicating boundary condition.
- */
- bool isBoundAt(int x, int y);
-
- /**
- * Accessor: returns density value at a particular coordinate. Valid indicies range
- * from 1 to N. Indicies 0 and N+1 are buffer rows for algorithms.
- *
- * @param x X-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @return Density value at coordinate.
- */
- float getDensityAt(int x, int y);
-
-
- /**
- * Accessor: returns vertical velocity value at a particular coordinate.
- * Valid indicies range from 1 to N. Indicies 0 and N+1 are buffer rows for
- * algorithms.
- *
- * @param x X-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @return Vertical velocity value at coordinate.
- */
- float getVertVelocityAt(int x, int y);
-
-
- /**
- * Accessor: returns horizontal velocity value at a particular coordinate.
- * Valid indicies range from 1 to N. Indicies 0 and N+1 are buffer rows for
- * algorithms.
- *
- * @param x X-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @return Horizontal velocity value at coordinate.
- */
- float getHorzVelocityAt(int x, int y);
-
-
- /**
- * Runs an iteration of the simulation, updating density and velocity values.
- * Also resets u_prev, v_prev, and dens_prev.
- *
- */
- void update();
-
-
- /**
- * Resets all public array values to zero.
- */
- void reset();
-
-protected:
- float* u_;
- float* v_;
- float* u_prev_;
- float* v_prev_;
- float* dens_;
- float* dens_prev_;
- bool* bounds_;
-
- int N_;
- float dt_;
- float diff_;
- float visc_;
-
-
-
- /**
- * Calculates size, including buffer cells.
- * @return Total size of fluid simulation array, including buffer cells
- */
- int getSize();
-
-
-
- /**
- * Tests to see whether a coordinate is inside the valid range of fluid computation
- * cells. (1 - N)
- * @param x - y-coordinate
- * @param y - y-coordinate to test
- * @return True if the coordinate pair is within the valid range, false if not.
- */
- bool isValidCoordinate(int x, int y);
-
-
-
- /**
- * Adds values to a matrix array, scaling the values by the timestep.
- * @param x - reference to a float matrix array that values will be added to
- * @param s - reference to a float matrix array representing how much value to add
- */
- void addSource(float* x, float* s);
-
-
-
- /**
- * Sets the boundaries the fluid will collide with. The horizontal component of the velocity should
- * be zero on the vertical walls, while the vertical component of
- * the velocity should be zero on the horizontal walls.
- *
- * @param boundsFlag - flag value: -
- 1 - calculate collision response for horizontal velocity component (u) cells.
- 2 - calculate collision response for vertical velocity component (v) cells.
- * @param *x - pointer to a float array representing the horizontal velocity components
- */
- void setBounds(int boundsFlag, float* x);
-
-
-
- /**
- * Use Gauss-Seidel relaxation on elements in the matrix to work backwards in time
- * to find the or velocities densities we started with.
- *
- * @param b - boundary condition flag
- * @param x - pointer to an array containing final solution values
- * @param x0 - pointer to an array containing a guess of the initial solution
- * @param a - coefficient of relaxation per cell (dt * diffusion rate * total number of cells)
- * @param c - divide out least common denominator from fraction addition
- */
- void linearSolve ( int boundsFlag, float* x, float* x0, float a, float c);
-
-
-
- /**
- * Diffuse density values among surrounding cells
- *
- * @param b - boundary condition flag
- * @param x - pointer to an array containing final density values
- * @param x0 - pointer to an array containing initial density values
- */
- void diffuse ( int boundsFlag, float* x, float* x0);
-
-
-
- /**
- * Move density or velocity values along a velocity field by treacting each cell center as
- * a particle and using a simple linear backtrace.
- *
- * @param b - boundary condition flag
- * @param d - pointer to a matrix array containing final density or velocity values
- * @param d0 - pointer to a matrix array containing initial density or velocity values
- * @param u - pointer to a matrix array containing horizontal velocity components
- * @param v - pointer to a matrix array containing vertical velocity components
- */
- void advect (int boundsFlag, float* d, float* d0, float* u, float* v);
-
-
-
- /**
- * Get the velocity field to conserve mass by using a Hodge decomposition.
- *
- * @param u - pointer to a matrix array containing x components of velocity
- * @param v - pointer to a matrix array containing y components of velocity
- * @param p - pointer to a matrix array containing projected solution
- * @param div - pointer to a matrix array containing divergence values
- */
- void project (float* u, float* v, float* p, float* div);
-
-
-
- /**
- * Performs density calculations per timestep
- *
- * @param x - pointer to a matrix array of final density values
- * @param x0 - pointer to a matrix array representing density values added this timestep
- * @param u - pointer to a matrix array containing x components of velocity
- * @param v - pointer to a matrix array containing y components of velocity
- */
- void computeDensityStep ( float* x, float* x0, float* u, float* v );
-
-
-
- /**
- * Performs velocity calculations per timestep
- *
- * @param u - pointer to a matrix array containing x components of velocity
- * @param v - pointer to a matrix array containing y components of velocity
- * @param u0 - pointer to a matrix array containing x components of velocity to be added
- * this timestep.
- * @param u0 - pointer to a matrix array containing y components of velocity to be added
- * this timestep.
- */
- void computeVelocityStep (float* u, float* v, float* u0, float* v0);
-};
-
diff --git a/Chapter9_FluidInteractionUsingKinect/FluidSolverMultiUser.cpp b/Chapter9_FluidInteractionUsingKinect/FluidSolverMultiUser.cpp
deleted file mode 100755
index 98cd16f..0000000
--- a/Chapter9_FluidInteractionUsingKinect/FluidSolverMultiUser.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file FluidSolverMultiUser.cpp
- * @author Austin Hines
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.0
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- */
-
-#include "FluidSolverMultiUser.h"
-
-#define ROW_WIDTH N_+2
-#define IX(i,j) ((i)+(ROW_WIDTH)*(j))
-#define FOR_EACH_CELL for (i=1 ; i<=N_ ; i++) { for (j=1 ; j<=N_ ; j++) {
-#define END_FOR }}
-#define SWAP(x0,x) { float* tmp=x0; x0=x; x=tmp; }
-#define SWAP2D(x0,x) {float ** tmp=x0; x0=x; x=tmp;}
-
-////// public methods
-FluidSolverMultiUser::FluidSolverMultiUser(int nUsers, int N, float dt, float diff, float visc) :
- FluidSolver(N, dt, diff, visc)
-{
- nUsers_ = nUsers;
- int size = getSize();
-
- userDensity_ = new float*[nUsers_];
- userDensity_prev_ = new float*[nUsers_];
-
-
- for(int i = 0; i < nUsers_; i++) {
- userDensity_[i] = new float[size];
- userDensity_prev_[i] = new float[size];
- }
-
- reset();
-}
-
-
-
-FluidSolverMultiUser::~FluidSolverMultiUser(void)
-{
-}
-
-
-
-void FluidSolverMultiUser::addDensityAt(int userID, int x, int y, float value)
-{
- if(isValidCoordinate(x, y))
- userDensity_prev_[userID][IX(x,y)] += value * dt_;
-}
-
-
-
-float FluidSolverMultiUser::getDensityAt(int userID, int x, int y)
-{
- return userDensity_[userID][IX(x,y)];
-}
-
-
-
-void FluidSolverMultiUser::update()
-{
- for(int i = 0; i < nUsers_; i++)
- computeDensityStep(userDensity_[i], userDensity_prev_[i], u_, v_);
- computeVelocityStep(u_, v_, u_prev_, v_prev_);
-
- //reset u_prev_, v_prev_, and dens_prev
- for(int i = 0; i < getSize(); i++) {
- u_prev_[i] = v_prev_[i] = 0.0f;
- }
-
- resetUserDensities(userDensity_prev_);
-}
-
-void FluidSolverMultiUser::reset()
-{
- for(int i = 0 ; i < getSize(); i++) {
- u_[i] = v_[i] = u_prev_[i] = v_prev_[i] = 0.0f;
- bounds_[i] = false;
- }
-
- resetUserDensities(userDensity_);
- resetUserDensities(userDensity_prev_);
-}
-
-
-////// protected methods
-void FluidSolverMultiUser::resetUserDensities(float** userDensity)
-{
- for(int i = 0; i < nUsers_; i++)
- for(int j = 0; j < getSize(); j++) {
- if(i == 0) //the user "no user" (0) has full weight
- userDensity[i][j] = 1.0f;
- else
- userDensity[i][j] = 0.0f;
- }
-}
\ No newline at end of file
diff --git a/Chapter9_FluidInteractionUsingKinect/FluidSolverMultiUser.h b/Chapter9_FluidInteractionUsingKinect/FluidSolverMultiUser.h
deleted file mode 100755
index 3fc5b87..0000000
--- a/Chapter9_FluidInteractionUsingKinect/FluidSolverMultiUser.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file FluidSolverMultiUser.h
- * @author Austin Hines
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.0
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- */
-
-#pragma once
-#include "fluidsolver.h"
-
-/**
- * Extends the 2D FluidSolver to support simulation of densities that represent various
- * users in the Fluid Wall simulation
- *
- */
-class FluidSolverMultiUser :
- public FluidSolver
-{
-public:
- /**
- * Parameter constructor
- * @param nUsers Number of users that the solver will calculate.
- * @param N Width (and height) of the square fluid simulation grid
- * @param dt Timestep size
- * @param diff Diffusion coefficient
- * @param visc Viscosity coefficient
- */
- FluidSolverMultiUser(int nUsers, int N, float dt, float diff, float visc);
- ~FluidSolverMultiUser(void);
-
- /**
- * Adds density values at a particular coordinate.
- *
- * The system is designed to add values from multiple other sources using this method,
- * and then commit them to the simulation using update(). Valid indicies range from
- * 1 to N. Indicies 0 and N+1 are buffer rows for algorithms.
- *
- * @param userID User number to add density to
- * @param x x-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @param value Density value to be added
- */
- void addDensityAt(int userID, int x, int y, float value);
-
- /**
- * Accessor: returns density value for given user at given coordinate. Valid indicies range
- * from 1 to N. Indicies 0 and N+1 are buffer rows for algorithms.
- *
- * @param userID User ID.
- * @param x X-coordinate, valid values: 1 - N
- * @param y y-coordinate, valid values: 1 - N
- * @return Density value at coordinate.
- */
- float getDensityAt(int userID, int x, int y);
-
- /**
- * Runs an iteration of the simulation, updating density and velocity values.
- * Also resets u_prev, v_prev, and dens_prev.
- *
- */
- void update();
-
-
- /**
- * Resets all public array values to zero.
- */
- void reset();
-
-protected:
- int nUsers_;
- float** userDensity_;
- float** userDensity_prev_;
-
- /**
- * Resets values in userDensity so that user 0
- * has 1.0 density.
- */
- void resetUserDensities(float** userDensity);
- //normalize
-
-
-};
-
diff --git a/Chapter9_FluidInteractionUsingKinect/KinectController.cpp b/Chapter9_FluidInteractionUsingKinect/KinectController.cpp
deleted file mode 100755
index 437604a..0000000
--- a/Chapter9_FluidInteractionUsingKinect/KinectController.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file KinectController.cpp
- * @author Naureen Mahmood
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.1
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- * Version History:
- * 1.0.0
- * - Initial Release
- */
-
-#include "KinectController.h"
-
-// XnOpenNI Callbacks when user is detected or lost
-void XN_CALLBACK_TYPE User_NewUser_Cback (xn::UserGenerator& generator, XnUserID nId, void* pCookie);
-void XN_CALLBACK_TYPE User_LostUser_Cback (xn::UserGenerator& generator, XnUserID nId, void* pCookie);
-
-// Initialize all KinectController variables & modules
-XnStatus KinectController::init()
-{
- initDepthControl();
- CHECK_RC(xnRetVal, "InitDepthControl");
- return xnRetVal;
-}
-
-
-/****************************************//**
-* Depth & User Tracking Modules
-********************************************/
-XnStatus KinectController::initDepthControl()
-{
- // Initializing maximum iterations of Kinect data stream capture
- // before resetting them (this is useful when running the application
- // in crowded environments so that the kinect can purge tracking
- // information of previous users and quickly continue tracking the most recent
- // players)
- _maxIterate = ITERATIONS_BEFORE_RESET;
- _iterationCount = 0;
-
- // Initialize Status variable and creating Context object
- xnRetVal = XN_STATUS_OK;
- xnRetVal = xnContext.Init();
- CHECK_RC(xnRetVal, "Context.Init");
-
-
- // DepthGenerator: Create node
- xnRetVal = xnDepthGenerator.Create(xnContext);
- CHECK_RC(xnRetVal, "DepthGenerator.Create");
-
- // DepthGenerator: Set it to VGA maps at 30 FPS
- XnMapOutputMode mapMode;
- mapMode.nXRes = XN_VGA_X_RES;
- mapMode.nYRes = XN_VGA_Y_RES;
- mapMode.nFPS = 30;
-
- // DepthGenerator: Set MapMode
- xnRetVal = xnDepthGenerator.SetMapOutputMode(mapMode);
- CHECK_RC(xnRetVal, "DepthGenerator.SetOutputMode");
-
-
- // UserGenerator: Create node
- xnRetVal = xnUserGenerator.Create(xnContext);
- CHECK_RC(xnRetVal, "UserGenerator.Create");
-
- // UserGenerator: Set Callbacks Handles
- XnCallbackHandle cbackHandle;
- xnUserGenerator.RegisterUserCallbacks (User_NewUser_Cback, User_LostUser_Cback, NULL, cbackHandle);
-
- xnDepthGenerator.GetMetaData(xnDepthMD);
- xnUserGenerator.GetUserPixels(0, xnSceneMD);
-
-
- // Generate all objects
- xnRetVal = xnContext.StartGeneratingAll();
- CHECK_RC(xnRetVal, "StartGenerating");
-
- return xnRetVal;
-}
-
-// Update the XnOpenNI Depth & User tracking data for each frame of video captured
-XnStatus KinectController::update()
-{
- // Restart all kinect processes every once in a while
- if (_iterationCount > _maxIterate)
- reset();
-
- // Context: Wait for new data to be available
- xnRetVal = xnContext.WaitOneUpdateAll(xnDepthGenerator);
- CHECK_RC(xnRetVal, "UpdateAll");
-
- // DepthGenerator: Take current depth map
- const XnDepthPixel* pDepthMap = xnDepthGenerator.GetDepthMap();
- const XnDepthPixel* pDepth = xnDepthMD.Data();
- const XnLabel* pLabels = xnSceneMD.Data();
-
-
- // UserGenerator: Define number of users to be tracked
- XnUserID* userID = new XnUserID [_maxUsers];
- XnUInt16 nUsers = _maxUsers;
-
- // UserGenerator: Get Tracked Users' IDs
- xnUserGenerator.GetUsers(userID, nUsers);
- CHECK_RC(xnRetVal, "UserGenerator.GetUser");
-
-
- // Create temp matrices to store depth & user detection values before flip
- Mat toflip_depthMatrix = Mat::zeros(480,640,CV_8UC1);
- Mat toflip_usersMatrix = Mat::zeros(480,640,CV_8UC1);
-
- // Iterate through the current frame and store depth & user detection values in above matrices
- for (int ind = 0; ind < HEIGHT * WIDTH; ind++)
- { // check if current pixel is within depth threshold
- if (pDepthMap[ind] < _maxDepth)
- {
- toflip_depthMatrix.data[ind] = pDepthMap[ind];
- toflip_usersMatrix.data[ind] = pLabels[ind];
- }
- }
-
- flip( toflip_depthMatrix, _depthMatrix, -1 );
- flip( toflip_usersMatrix, _usersMatrix, -1 );
-
- _iterationCount++;
- return xnRetVal;
-}
-
-
-// Shutdown and restart all Kinect modules
-XnStatus KinectController::reset()
-{
- kinectCleanupExit();
- _iterationCount = 0;
- init ();
- update ();
- return xnRetVal;
-}
-
-/*! Set Depth Threshold */
-void KinectController::setDepth(int depthDelta)
-{
- _maxDepth += depthDelta;
- cout<<"Depth Threshold: "<<_maxDepth<.
- *
- * Version History:
- * 1.0.0
- * - Initial Release
- */
-
-#ifndef KINECT_CONTROLLER_H
-#define KINECT_CONTROLLER_H
-
-//OpenNI includes
-#include
-#include
-
-//OpenCV includes
-#include
-#include
-#include
-#include
-
-#define HEIGHT XN_VGA_Y_RES
-#define WIDTH XN_VGA_X_RES
-#define SAMPLE_XML_PATH "Data/SamplesConfig.xml"
-#define CHECK_RC(nRetVal, what) \
- if (nRetVal != XN_STATUS_OK) \
- { \
- printf("%s failed: %s\n", what, xnGetStatusString(nRetVal));\
- return xnRetVal; \
- }
-
-#define MAX_USERS 6
-#define MAX_DEPTH 3000
-
-#define ITERATIONS_BEFORE_RESET 10000
-using namespace std;
-using namespace cv;
-
-//! KinectController Class
-/*!
- KinectController Class initializes and runs all the modules
- for controlling the kinect camera devices.
-*/
-class KinectController
-{
-public:
- /*
- * (Default) Constructor with initialization list.
- * @param _maxDepth initialize depth threshold for Kinect depth data stream(less than 6000)
- * @param _maxUsers initialize maximum users to be detected (between 1-6)
- * @param _depthMatrix initialize an empty cvMatrix to store the kinect depth-map
- * @param _usersMatrix initialize an empty cvMatrix to store the kinect userID-map
- */
- /*! Constructor with initialization lists */
- KinectController () : _maxDepth( MAX_DEPTH ),
- _maxUsers( MAX_USERS ),
- _depthMatrix( Mat::zeros(480,640,CV_8UC1) ),
- _usersMatrix( Mat::zeros(480,640,CV_8UC1) )
- { init(); }
-
- /*! Destructor */
- ~KinectController() { kinectCleanupExit(); }
- /*! Initialize all KinectController variables & modules */
- XnStatus init();
- /*! Depth & User Tracking Modules */
- XnStatus update();
- /*! Update the XnOpenNI Depth & User tracking data for each frame of video captured */
- XnStatus reset();
- /*! Set Depth Threshold */
- void setDepth(int depthDelta);
- /*! Get depth matrix for current video frame */
- void getDepthMat(Mat &depth) { _depthMatrix.copyTo(depth); }
- /*! Get matrix of tracked users for current video frame */
- void getUsersMat(Mat &users) { _usersMatrix.copyTo(users); }
- /*! Get maximum number of users to be tracked */
- int getMaxUsers() { return _maxUsers; }
-
-private:
- // OPENNI DEPTH & USER TRACKING VARIABLES
- xn::Context xnContext; /*! context object that creates depth and user data nodes */
- xn::DepthGenerator xnDepthGenerator; /*! captures and returns depth values at each frame */
- xn::UserGenerator xnUserGenerator; /*! captures and returns user detection data at each frame */
- xn::SceneMetaData xnSceneMD; /*! scene metadata: gives access to IDs of detected users at each pixel of a captured frame */
- xn::DepthMetaData xnDepthMD; /*! depth metadata: gives access to depth data at each pixel of a captured frame */
-
- XnStatus xnRetVal; /*! used to check the status of each call to an XNOpenNI function */
- int _maxUsers; /*! users to detect */
- int _maxDepth; /*! depth threshold for how far the Kinect should capture */
- int _maxIterate; /*! iterations to run before reset */
- int _iterationCount; /*! running iterations so far (goes up to maxIterate then resets to 0) */
- Mat _depthMatrix; /*! image-sized matrix containing the depth values at each pixel */
- Mat _usersMatrix; /*! image-sized matrix containing the userID's of detected people at
- /*! each pixel (or 0 if no detected user at that pixel) */
- /*! Initialize XnOpenNI depth control & user tracking modules */
- XnStatus initDepthControl();
- /*! Destroy & shutdown XnOpenNI depth control & user tracking modules */
- void stopDepthControl() { xnContext.Shutdown(); }
- /*! Run Shutdown functions for Depth control */
- void kinectCleanupExit();
-};
-
-#endif
\ No newline at end of file
diff --git a/Chapter9_FluidInteractionUsingKinect/README.txt b/Chapter9_FluidInteractionUsingKinect/README.txt
deleted file mode 100755
index 5c4f8d8..0000000
--- a/Chapter9_FluidInteractionUsingKinect/README.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-******************************************************************************
-* Fluid Interaction using Kinect
-******************************************************************************
-* by Naureen Mahmood & Austin Hines, 5th Dec 2012.
-* http://code.google.com/p/fluidwall/
-******************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-******************************************************************************
-
-For instructions on building & setting up FluidWall, please visit the website:
- "http://code.google.com/p/fluidwall/"
-
-
-Note: If you receive error messages saying "UPDATE ALL: ..." in your console:
- Sometimes the Kinect will not synchronize properly with the program, usually after quitting and re-launching in quick succession. If this happens, just quit, wait, and try launching again. This error is rare.
-
diff --git a/Chapter9_FluidInteractionUsingKinect/fluidWall.cpp b/Chapter9_FluidInteractionUsingKinect/fluidWall.cpp
deleted file mode 100755
index c21a401..0000000
--- a/Chapter9_FluidInteractionUsingKinect/fluidWall.cpp
+++ /dev/null
@@ -1,1199 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file fluidWall.cpp
- * @author Austin Hines
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.1
- *
- * The main executable for Fluid Wall. Contains functions that define how the
- * KinectController class effects the FluidSolver classes. This defines the general
- * behavior of the simulation.
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- * Version History:
- * 1.0.1
- * - Added fullscreen toggle mode, bound to 'q' key.
- * - Removed 'q' key as quit.
- * - Added Version tag to GLUT window and command line output.
- * 1.0.0
- * - Initial Release
- *
- */
-
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-#include
-
-#include
-
-#include "FluidSolver.h"
-#include "FluidSolverMultiUser.h"
-#include "KinectController.h"
-
-static const char* VERSION = "1.0.1 BETA";
-
-#ifndef GL_BGR //fix omission from windows OpenGL implementation
- #define GL_BGR GL_BGR_EXT
-#endif
-#ifndef GL_BGRA
- #define GL_BGRA GL_BGRA_EXT
-#endif
-
-#define DEBUG 0
-#define USE_GPU 0
-
-// macros
-#define ROW_WIDTH N+2
-#define IX(i, j) ((i) + (ROW_WIDTH) * (j))
-#define FOR_EACH_CELL for(i = 1; i <= N; i++) { for(j = 1; j <= N; j++) {
-#define END_FOR }}
-#define SWAP(x0, x) { float* tmp = x0; x0 = x; x = tmp; }
-
-///// constants
-const static int GRID_SIZE = 128;
-const static float FLOW_SCALE = 0.1;
-const static int SPLASH_ROWS = 80;
-const static float BG_OFFSET = 0.1;
-const static int DEF_WINDOW_SIZE = 512;
-
-using namespace std;
-using namespace cv;
-using namespace cv::gpu;
-
-typedef struct
-{
- Point2f center;
- Point2f vel;
- int lifespan, lifeElapsed;
- int radius;
- int userID;
-} Emitter;
-
-// =============================================================================
-// GLOBAL VARIABLES
-// =============================================================================
-
-static FluidSolver *solver;
-static FluidSolverMultiUser *userSolver;
-static KinectController *kinect;
-static bool useUserSolver = false;
-
-static Mat depthMatrix;
-static Mat usersMatrix;
-static Mat resizedUsersMatrix;
-
-GLfloat colors[][3] = // user colors for fluid emission
-{
- //{0.02f,0.02f,0.02f},
- {0.0f,0.0f,0.0f},
- {0.0f,1.0f,1.0f},
- {0.5f,1.0f,0.0f},
- {0.5f,0.5f,0.0f},
- {0.0f,0.4f,0.6f},
- {0.0f,1.0f,0.0f},
- {1.0f,0.5f,0.0f},
- {1.0f,1.0f,0.0f},
- {1.0f,0.0f,0.0f},
- {0.0f,0.5f,1.0f},
- {1.0f,1.0f,0.5f},
- {1.0f,1.0f,1.0f}
-};
-
-GLfloat colorsWhiteBG[][3] = // user colors for fluid emission
-{
- {0.02f,0.02f,0.02f},
- {0.0f,1.0f,1.0f},
- {0.5f,1.0f,0.0f},
- {1.0f,0.5f,0.0f},
- {0.0f,0.0f,1.0f},
- {0.0f,1.0f,0.0f},
- {1.0f,1.0f,0.0f},
- {1.0f,0.0f,0.0f},
- {0.0f,0.5f,1.0f},
- {0.5f,0.0f,1.0f},
- {1.0f,1.0f,0.5f},
- {1.0f,1.0f,1.0f}
-};
-
-
-//particle system variables
-static int N;
-static float force = 5.0f;
-static float source = 20.0f;
-static int MAX_EMITTERS = 200;
-
-static bool useFlow; //use optical flow
-static vector emittersList(MAX_EMITTERS);
-
-static Mat depthImage;
-static Mat flow; //optical flow matrix
-static Mat currFlowImg, prevFlowImg;
-static GpuMat gpuFlowX, gpuFlowY; //gpu-enabled optical flow matrix
-static GpuMat gpuPrevFlowImg, gpuCurrFlowImg;
-
-//OpenGL
-static int winID;
-static int winX, winY;
-static int mouseDown[3];
-static int omx, omy, mx, my;
-
-//display flags
-static int dvel, dbound, dusers;
-
-//mode change variables
-static bool autoChangeMode = false;
-static bool useWhiteBackground = false;
-static int mode = 0;
-static int maxMode = 3;
-static int iterations = 0;
-static int iterationsPerMode = 500; //frames per mode
-
-//forward method declarations
-static void changeMode(int newMode);
-static void openGlutWindow ( void );
-static void initOpenGl();
-static void toggleFullscreen();
-/*
- ----------------------------------------------------------------------
- free/clear/allocate simulation data
- ----------------------------------------------------------------------
-*/
-
-/**
- * Clears all solver data.
- */
-static void clearData(void)
-{
- if(useUserSolver)
- userSolver->reset();
- else
- solver->reset();
-
- emittersList.clear();
-}
-
-/**
- * Initializes all objects and defines constant variables used in main program.
- * TODO: relegate this code to a singleton class.
- */
-static int allocateData ( void )
-{
- solver = new FluidSolver(GRID_SIZE, 0.1f, 0.00f, 0.0f);
- kinect = new KinectController();
- userSolver = new FluidSolverMultiUser(kinect->getMaxUsers(), GRID_SIZE,0.1f, 0.00f, 0.0f);
- emittersList.reserve(MAX_EMITTERS);
- for(int i = 0; i < MAX_EMITTERS; i++)
- {
- Emitter newEmit = {Point(0, 0), Point2f(0, 0), 1, 2, 1, 0};
- emittersList.push_back(newEmit);
- }
-
- N = GRID_SIZE;
- flow = Mat::zeros(N, N, CV_32FC2);
-
- useFlow = true;
-
- #if DEBUG
- namedWindow("Users",1);
- namedWindow("flow", 1);
- #endif
-
- return ( 1 );
-}
-
-
-/**
- * Cleans up any allocated memory.
- */
-void cleanupExit()
-{
- if (glutGameModeGet(GLUT_GAME_MODE_ACTIVE))
- glutLeaveGameMode();
- exit(0);
-}
-
-
-/**
- * Used for debug and basic testing of fluid simulation. Drives fluid simulation based on
- * mouse input.
- *
- * - Left Mouse Button + Drag adds velocity
- * - Middle Mouse Button adds boundaries,
- * - Right Mouse Button adds density.
- *
- */
-static void getForcesFromMouse(FluidSolver* flSolver)
-{
- int x, y;
-
- bool noButtonsPressed = !mouseDown[0] && !mouseDown[2] && !mouseDown[1];
- if (noButtonsPressed) return;
-
- // determine mouse position on the fluid grid by divide screenspace by N gridspaces
- x = (int)(( mx / (float)winX) * N + 1);
- y = (int)(((winY - my) / (float)winY) * N + 1);
-
- bool isMouseOutsideFluidGrid = (x < 1) || (x > N) || (y < 1) || (y > N);
- if (isMouseOutsideFluidGrid) return;
-
- if (mouseDown[0]) { //left mouse button
- flSolver->addHorzVelocityAt(x, y, force * (mx - omx));
- flSolver->addVertVelocityAt(x, y, force * (omy - my));
- }
-
- if (mouseDown[1]) // middle mouse button
- flSolver->setBoundAt(x, y, true);
-
- if (mouseDown[2]) // right mouse button
- if(useUserSolver)
- userSolver->addDensityAt(1, x, y, source);
- else
- flSolver->addDensityAt(x, y, source);
-
- omx = mx;
- omy = my;
- return;
-}
-
-
-/**
- * Loads kinect depth and users data into local matrices.
- * Resizes kinect data to fluid simulation grid size:
- * resizedDepthMatrix = NxN matrix
- * resizedUsersMatrix = NxN matrix
- */
-int loadKinectData()
-{
- Mat resizedDepthMatrix = Mat::zeros(N, N, CV_8UC1);
- Mat resizedUsersMatrix = Mat::zeros(N, N, CV_8UC1);
-
- // depth tracking
- kinect->update();
- kinect->getDepthMat(depthMatrix);
- if(depthMatrix.empty())
- {
- cout<<"ERROR: Cannot load depth frame"<getUsersMat(usersMatrix);
- resize(usersMatrix, resizedUsersMatrix, resizedUsersMatrix.size());
- }
- return 0;
-}
-
-/**
- * Translates input depthImage or userID values into collision
- * boundaries in the FluidSolver. Currently set to define
- * a boundary at any pixel in the depthImage with a
- * value greater than zero.
- */
-static void defineBoundsFromImage(FluidSolver* flSolver, Mat &bounds)
-{
- for( int y = 0; y < bounds.rows; y++ )
- for( int x = 0; x < bounds.cols; x++ )
- {
- uchar &pixelVal = bounds.at(y, x);
- //add + 1 to coordinates because fluid matrix indicies range from 1 - N
- if( pixelVal > 0)
- flSolver->setBoundAt(x, y, true);
- else
- flSolver->setBoundAt(x, y, false);
- }
-}
-
-
-/**
- * Draws and displays a graphical representation of the optical flow results using OpenCV.
- *
- * @param flow - Matrix of type CV_32FC2 containing results of optical flow calculation.
- * @param cflowmap - Color image representing the input of optical flow
- * @param step - number of pixels to skip when drawing each vector. (drawing a vector for
- * every pixel would be too dense and graphically noisy.
- * @param color - CV_RGB scalar specifying color to draw the optical flow vectors.
- */
-void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
- double, const Scalar& color)
-{
- for(int y = 0; y < cflowmap.rows; y += step)
- for(int x = 0; x < cflowmap.cols; x += step)
- {
- const Point2f& fxy = flow.at(y, x);
- line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), color);
- circle(cflowmap, Point(x,y), 2, color, -1);
- }
-}
-
-
-
-/**
- * Translates optical flow into velocity values. Flow values are
- * rounded with cvRound to eliminate noise. Results are added directly into FluidSolver.
- */
-static void computeOpticalFlow(FluidSolver* flSolver, Mat& flow)
-{
- if(!prevFlowImg.empty())
- {
- #if USE_GPU
- GpuMat d_frameL(prevFlowImg), d_frameR(currFlowImg);
- GpuMat d_flowx, d_flowy;
- FarnebackOpticalFlow calcFlowFB;
- Mat flowx, flowy;
-
- calcFlowFB(d_frameL, d_frameR, d_flowx, d_flowy);
- d_flowx.download(flowx);
- d_flowy.download(flowy);
- Mat planes[] = {flowx, flowy};
- planes->copyTo(flow);
- #else
- calcOpticalFlowFarneback(prevFlowImg, currFlowImg, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
- #endif
- for(int y = 1; y < N; y++)
- { for(int x = 1; x < N; x++)
- {
- const Point2f& fxy = flow.at(y, x);
- flSolver->addHorzVelocityAt(x, y, FLOW_SCALE * fxy.x);
- flSolver->addVertVelocityAt(x, y, FLOW_SCALE * fxy.y);
- }
- }
- }
- #if DEBUG
- Mat cflow;
- cvtColor(prevFlowImg, cflow, CV_GRAY2BGR);
- drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
- imshow("flow", cflow);
- #endif
- std::swap(prevFlowImg, currFlowImg);
-}
-
-
-/**
- * Creates an emitter object with given properties.
- */
-static void createEmitterAt(int center_x, int center_y, float force_u, float force_v, int lifespan, int radius, int userID = 1)
-{
- Emitter newEmit = {Point(center_x, center_y), Point2f(force_u, force_v), lifespan, 0, radius, userID};
- emittersList.push_back(newEmit);
- #if DEBUG
- cout<<"Emitter created: "< &e)
-{
- //TODO: convert emitters to fixed array. Bug being caught in loop right now.
- int i = 0;
- while (i < e.size())
- {
- bool emitterHasExpired = (e[i].lifespan - e[i].lifeElapsed == 0);
- if(emitterHasExpired)
- e.erase(e.begin() + i);
- else
- {
- if (useFlow)
- {
- Point lowerCoord, upperCoord;
-
- //calculate scalar for temporal falloff overlifespan
- float lifescalar = (e[i].lifespan - e[i].lifeElapsed) / e[i].lifespan;
-
- //prevent radius from referencing cells outside simulation matrix
- //TODO: clean up line breaks here
- lowerCoord.y = (e[i].center.y - e[i].radius) < 1 ? 1 : (e[i].center.y - e[i].radius);
- lowerCoord.x = (e[i].center.x - e[i].radius) < 1 ? 1 : (e[i].center.x - e[i].radius);
- upperCoord.y = (e[i].center.y + e[i].radius) > N ? N : (e[i].center.y + e[i].radius);
- upperCoord.x = (e[i].center.x + e[i].radius) > N ? N : (e[i].center.x + e[i].radius);
-
- for(int y = lowerCoord.y; y <= upperCoord.y; y++)
- for(int x = lowerCoord.x; x <= upperCoord.x; x++)
- {
- //calculate falloff from center
- float vscalar = fabs(y - e[i].center.y) / e[i].radius;
- float uscalar = fabs(x - e[i].center.x) / e[i].radius;
- float dscalar = (vscalar+uscalar) / 2;
-
- float horzVel = e[i].vel.x * uscalar;
- float vertVel = e[i].vel.y * vscalar;
- float density = source * dscalar * lifescalar;
-
- flSolver->addHorzVelocityAt(x, y, horzVel);
- flSolver->addVertVelocityAt(x, y, vertVel);
-
- if(useUserSolver)
- userSolver->addDensityAt(e[i].userID, x, y, density);
- else
- flSolver->addDensityAt(x, y, density);
- }
- e[i].lifeElapsed++;
- i++;
- }
- else
- {
- float fu, fv = 0.0;
- // emit splashes on either side of whole silhouette
- for (int j = 1; j <= N; j++)
- {
- for (int i = 1; i <= N; i++)
- {
- bool horzBoundChangesToYes = !flSolver->isBoundAt(i, j) && flSolver->isBoundAt(i+1, j);
- bool horzBoundChangesToNo = flSolver->isBoundAt(i, j) && !flSolver->isBoundAt(i+1, j);
-
- if(horzBoundChangesToYes)
- {
- fu = -0.05; // emit velocity in negative direction
- fv = 0.1;
- flSolver->addHorzVelocityAt(i, j, force * fu);
- flSolver->addVertVelocityAt(i, j, force * fv);
- }
- else if(horzBoundChangesToNo)
- {
- fu = 0.05; // emit velocity in positive direction
- fv = 0.1;
- flSolver->addHorzVelocityAt(i+1, j, force * fu);
- flSolver->addVertVelocityAt(i+1, j, force * fv);
- }
- }
- }
- }
- }
- }
-}
-
-
-/**
- * Creates emitter objects based on optical flow velocity.
- * If vertical velocity is negative at boundaries, an emitter
- * is created. An emission threshold prevents negative velocities
- * due to noise from creating emitters. In the call tree, we
- * assume this function is called after computeOpticalFlow.
- *
- * @param flsolver Fluid Solver to emit splashes into
- * @param flow Reference to a matrix containing optical flow velocities.
- */
-static void emitSplashes(FluidSolver* flSolver, Mat &flow)
-{
- //precondition: optical flow has been calculated
- float fu, fv;
- fu = fv = 0.0;
- int velocityEmissionThreshold = -0.05; //creates emitters based on velocity emission
-
- if(useFlow)
- {
- // Only look for emitters in splash rows.
- for( int j = 1; j < SPLASH_ROWS; j++)
- { for(int i = 1; i <= N; i++)
- {
- bool vertBoundChangesToYes = !flSolver->isBoundAt(i, j) && flSolver->isBoundAt(i, j+1);
- if(vertBoundChangesToYes)
- {
- const Point2f& opticalFlowVelocity = flow.at(i, j);
- fu = .8 * opticalFlowVelocity.x;
- fv = .8 * opticalFlowVelocity.y;
-
- if(opticalFlowVelocity.y < velocityEmissionThreshold)
- {
- if(useUserSolver)
- {
- int userID = resizedUsersMatrix.at(i, j+1);
- createEmitterAt(i, j-1, fu, fv, 6, 3, userID);
- }
- else
- createEmitterAt(i, j-1, fu, fv, 6, 3, 1);
- }
- }
- }
- }
- }
- renderEmitters(flSolver, emittersList);
-}
-
-/**
- * Changes various modes. Modes are given integer numbers to work with auto switcher function.
- *
- * @param newMode Mode number to change to. Valid values 0-3.
- */
-static void changeMode(int newMode)
-{
- mode = newMode;
- //we have changed modes, initialize new modes
- clearData();
- switch(newMode)
- {
- case 0:
- source = 20.0f;
- dvel = false;
- dbound = false;
- dusers = false;
- useFlow = true;
- useUserSolver = true;
- useWhiteBackground = false;
- cout<<"Changing to mode 0: Single color density"< iterationsPerMode))
- {
- iterations = 0;
- mode++;
- if(mode > maxMode)
- mode = 0;
-
- changeMode(mode);
- }
- else
- iterations++;
-}
-
-
-////////////////////////////////////////////////////////////////////////
-/*
- ----------------------------------------------------------------------
- OpenGL specific drawing routines
- ----------------------------------------------------------------------
-*/
-
-/**
- * Draws fluid velocity vectors in OpenGL.
- * @param flSolver FluidSolver containing the velocity to draw.
- *
- */
-static void drawVelocity(FluidSolver* flSolver)
-{
- int i, j;
- float x, y, h;
- h = 1.0f / N;
- glColor3f(1.0f, 1.0f, 1.0f);
- glLineWidth(1.0f);
-
- glBegin(GL_LINES);
- for (i = 1; i <= N; i++)
- {
- x = (i - 0.5f) * h;
- for (j = 1; j <= N; j++)
- {
- y = (j - 0.5f) * h;
- glVertex2f(x, y);
- glVertex2f(x + flSolver->getHorzVelocityAt(i,j),
- y + flSolver->getVertVelocityAt(i,j));
- }
- }
- glEnd();
-}
-
-
-/**
- * Draws bounding cells in OpenGL.
- * @param flSolver FluidSolver containing the bounds to draw.
- *
- */
-static void drawBounds(FluidSolver* flSolver)
-{
- int i, j;
- float x, y, h;
- h = 1.0f / N; //calculate unit length of each cell
-
- glBegin(GL_QUADS);
- for (i = 0; i <= N; i++)
- {
- x = i * h;
- for (j = 0; j <= N; j++)
- {
- y = j * h;
-
- if(flSolver->isBoundAt(i,j)) {
- glColor3f (0.30f, 0.30f, 0.30f); glVertex2f (x, y);
- glColor3f (0.30f, 0.30f, 0.30f); glVertex2f (x+h, y);
- glColor3f (0.30f, 0.30f, 0.30f); glVertex2f (x+h, y+h);
- glColor3f (0.30f, 0.30f, 0.30f); glVertex2f (x, y+h);
- }
- }
- }
- glEnd();
-}
-
-
-typedef struct {float R, G, B;} RGBType;
-typedef struct {float H, S, V;} HSVType;
-
-#define RETURN_RGB(r, g, b) {RGB.R = r; RGB.G = g; RGB.B = b; return RGB;}
-#define RETURN_HSV(h, s, v) {HSV.H = h; HSV.S = s; HSV.V = v; return HSV;}
-#define UNDEFINED -1
-
-/**
- * Converts an HSV color into RGB color space.
- *
- * @param HSV HSV color variable to convert into RGB.
- */
-RGBType HSV_to_RGB( HSVType HSV )
-{
- // H is given on [0, 6] or UNDEFINED. S and V are given on [0, 1].
- // RGB are each returned on [0, 1].
- float h = HSV.H, s = HSV.S, v = HSV.V, m, n, f;
- int i;
- RGBType RGB;
- if (h == UNDEFINED)
- RETURN_RGB(v, v, v);
- i = floor(h);
- f = h - i;
- if ( !(i&1) )
- f = 1 - f; // if i is even
- m = v * (1 - s);
- n = v * (1 - s * f);
- switch (i)
- {
- case 6 :
- case 0 : RETURN_RGB(v, n, m);
- case 1 : RETURN_RGB(n, v, m);
- case 2 : RETURN_RGB(m, v, n)
- case 3 : RETURN_RGB(m, n, v);
- case 4 : RETURN_RGB(n, m, v);
- case 5 : RETURN_RGB(v, m, n);
- }
-}
-
-/**
- * Computes the weighted color of all the user densities at a particular
- * pixel coordinate.
- *
- * @param x x-coordinate
- * @param y y-coordinate
- */
-RGBType getWeightedColor(int x, int y)
-{
- RGBType RGB;
- float R, G, B;
- R=G=B=0;
-
- for(int i = 0; i < MAX_USERS; i++)
- {
- float densityVal = userSolver->getDensityAt(i, x, y);
- if(useWhiteBackground)
- {
- R += colorsWhiteBG[i][0] * densityVal;
- G += colorsWhiteBG[i][1] * densityVal;
- B += colorsWhiteBG[i][2] * densityVal;
- }
- else
- {
- R += colors[i][0] * densityVal;
- G += colors[i][1] * densityVal;
- B += colors[i][2] * densityVal;
- }
- }
- RGB.R = R; RGB.B = B; RGB.G = G;
- return RGB;
-}
-
-
-
-/**
- * Render density grids as OpenGL quads with vertex shading.
- *
- * @param flSolver fluid solver
- */
-static void drawDensity ( FluidSolver* flSolver )
-{
- int i, j;
- float x, y, h;
- RGBType rgb00, rgb10, rgb11, rgb01;
- float d00, d10, d11, d01;
- float hue = 3.25;
- float sat = 1.0;
- h = 1.0f/N;
-
- glBegin ( GL_QUADS );
- for ( i=1 ; i<=N ; i++ )
- {
- x = (i-0.5f)*h;
- for ( j=1 ; j<=N ; j++ )
- {
- y = (j-0.5f)*h;
- if(useUserSolver)
- {
- //render density color for each point based on blending user values
- rgb00 = getWeightedColor(i,j);
- rgb10 = getWeightedColor(i+1,j);
- rgb11 = getWeightedColor(i+1,j+1);
- rgb01 = getWeightedColor(i,j+1);
- }
- else
- {
- //if a cell is a bounds cell, do not apply a density color
- d00 = flSolver->isBoundAt(i,j) ? 0 : BG_OFFSET + flSolver->getDensityAt(i,j);
- d10 = flSolver->isBoundAt(i+1,j) ? 0 : BG_OFFSET + flSolver->getDensityAt(i+1,j);
- d11 = flSolver->isBoundAt(i+1,j+1) ? 0 : BG_OFFSET + flSolver->getDensityAt(i+1,j+1);
- d01 = flSolver->isBoundAt(i,j+1) ? 0 : BG_OFFSET + flSolver->getDensityAt(i,j+1);
-
- //hsv to rgb using the density in the cell
- HSVType hsv00 = {hue, sat, d00};
- rgb00 = HSV_to_RGB(hsv00);
- HSVType hsv10 = {hue, sat, d10};
- rgb10 = HSV_to_RGB(hsv10);
- HSVType hsv11 = {hue, sat, d11};
- rgb11 = HSV_to_RGB(hsv11);
- HSVType hsv01 = {hue, sat, d01};
- rgb01 = HSV_to_RGB(hsv01);
- }
-
- glColor3f (rgb00.R, rgb00.G, rgb00.B); glVertex2f ( x, y );
- glColor3f (rgb10.R, rgb10.G, rgb10.B); glVertex2f ( x+h, y );
- glColor3f (rgb11.R, rgb11.G, rgb11.B); glVertex2f ( x+h, y+h );
- glColor3f (rgb01.R, rgb01.G, rgb01.B); glVertex2f ( x, y+h );
- }
- }
- glEnd ();
-}
-
-
-
-/**
- * Draws user silhouettes in unique colors per user in OpenGL.
- * Uses the usersMatrix from kinect.
- *
- */
-static void drawUsers(void)
-{
- int i, j;
- float x, y, h;
- int d00, index;
- h = 1.0f/N;
-
- glBegin ( GL_QUADS );
- for ( i=0 ; i<=N ; i++ )
- {
- x = i*h;
- for ( j=0 ; j<=N ; j++ )
- {
- y = j*h;
- index = j * N + i;
- d00 = resizedUsersMatrix.data[index];
- if(d00 != 0)
- {
- GLfloat R = colors[d00][0];
- GLfloat G = colors[d00][1];
- GLfloat B = colors[d00][2];
-
- //cout<<"found a color for user: "<reset();
- break;
- case 'o':
- case 'O':
- kinect->setDepth(+200);
- break;
- case 'k':
- case 'K':
- kinect->setDepth (-200);
- break;
- case 'Q' :
- case 'q' :
- toggleFullscreen();
- break;
- }
-}
-
-
-
-/**
- * GLUT mouse listener function. Called when mouse button is pressed or
- * released.
- *
- * @param button ID of button event that is presssed.
- * @param state A GLUT constant indicating mouse down or up
- * @param x x-coordinate of mouse at time of button event
- * @param y y-coordinate of mouse at time of button event
- */
-static void mouse_func ( int button, int state, int x, int y )
-{
- omx = mx = x;
- omx = my = y;
- mouseDown[button] = state == GLUT_DOWN;
-}
-
-
-
-/**
- * GLUT mouse movement function. Called when mouse is moved
- *
- * @param x x-coordinate of mouse
- * @param y y-coordinate of mouse
- */
-static void motion_func ( int x, int y )
-{
- mx = x;
- my = y;
-}
-
-
-
-/**
- * GLUT window reshaping function. Called whenever a window is resized.
- *
- * @param width New width of window
- * @param height New height of window.
- */
-static void reshape_func ( int width, int height )
-{
- glutReshapeWindow ( width, height );
- winX = width;
- winY = height;
-}
-
-
-/**
- * Called when OpenGL is not drawing. Calls for another draw frame
- */
-static void idle_func ( void )
-{
- bool fullscreen = glutGameModeGet(GLUT_GAME_MODE_ACTIVE);
- if(!fullscreen) glutSetWindow(winID);
- glutPostRedisplay();
-}
-
-
-
-/**
- * Prepares OpenGL canvas to draw a new frame.
- */
-static void pre_display ( void )
-{
-// glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
- glViewport ( 0, 0, winX, winY );
- glMatrixMode ( GL_PROJECTION );
- glLoadIdentity ();
- gluOrtho2D ( 0.0, 1.0, 0.0, 1.0 );
- glClearColor ( 0.0f, 0.0f, 0.0f, 1.0f );
- glClear ( GL_COLOR_BUFFER_BIT );
-}
-
-
-
-/**
- * Posts the drawn frame to the screen.
- */
-static void post_display ( void )
-{
- glutSwapBuffers ();
-}
-////////////////////////////////////////////////////////////////////////////
-
-/**
- * Draws OpenGL polygons that represent the fluid simulation. Also calles
- * methods that trigger simulation steps. Most of the behavior of the
- * program is defined in this method.
- *
- */
-static void drawFunction ( void )
-{
- FluidSolver* flSolver;
- bool dispUsr = useUserSolver && dusers;
-
- if(useUserSolver) flSolver = userSolver;
- else flSolver = solver;
-
- tryChangeMode();
- pre_display();
- loadKinectData();
-
- defineBoundsFromImage (flSolver, depthImage);
- getForcesFromMouse (flSolver);
- if(useFlow) computeOpticalFlow(flSolver, flow);
- emitSplashes (flSolver, flow);
-
- if(useUserSolver) userSolver->update();
- else solver->update();
-
- if(dvel) drawVelocity(flSolver);
- else drawDensity(flSolver);
-
- if(dbound) drawBounds(flSolver);
- if(dispUsr) drawUsers();
- post_display();
-}
-
-
-/**
- * Sets OpenGL Callbacks
- *
- */
-static void initOpenGl()
-{
- glClearColor ( 0.0f, 0.0f, 0.0f, 1.0f );
- glClear ( GL_COLOR_BUFFER_BIT );
- glutSwapBuffers ();
- glClear ( GL_COLOR_BUFFER_BIT );
- glutSwapBuffers ();
-
- pre_display ();
-
- glutKeyboardFunc (key_func );
- glutMouseFunc (mouse_func );
- glutMotionFunc (motion_func );
- glutReshapeFunc (reshape_func );
- glutIdleFunc (idle_func );
- glutDisplayFunc (drawFunction );
-}
-
-
-/**
- * Switches between fullscreen and windowed mode.
- */
-static void toggleFullscreen()
-{
- bool fullscreen = glutGameModeGet(GLUT_GAME_MODE_ACTIVE);
- if(fullscreen)
- {
- winX = winY = DEF_WINDOW_SIZE;
- glutLeaveGameMode();
- openGlutWindow();
- }
- else
- {
- glutGameModeString("640x480:16@60");
- if (glutGameModeGet(GLUT_GAME_MODE_POSSIBLE))
- {
- glutEnterGameMode();
- //glutDestroyWindow(winID);
- winX = glutGameModeGet(GLUT_GAME_MODE_WIDTH);
- winY = glutGameModeGet(GLUT_GAME_MODE_HEIGHT);
- initOpenGl();
- }
- else
- printf("The select fullscreen mode is not available\n");
- }
-}
-
-
-
-/**
- * Opens a GLUT compatible window and sets OpenGL callbacks.
- *
- */
-static void openGlutWindow ( void )
-{
- glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
- glutInitWindowPosition ( 0, 0 );
- glutInitWindowSize ( winX, winY );
-
- char* windowName;
- sprintf(windowName, "Fluid Wall %s", VERSION);
- winID = glutCreateWindow(windowName);
-
- //register callbacks
- initOpenGl();
-}
-
-
-
-//////////////////////////////////////////////////////////////////////////
-/*
- ----------------------------------------------------------------------
- main --- main routine
- ----------------------------------------------------------------------
-*/
-int main ( int argc, char ** argv )
-{
- glutInit ( &argc, argv);
- if ( argc != 1 && argc != 6 )
- {
- fprintf ( stderr, "usage : %s N dt diff visc force source\n", argv[0] );
- fprintf ( stderr, "where:\n" );\
- fprintf ( stderr, "\t N : grid resolution\n" );
- fprintf ( stderr, "\t dt : time step\n" );
- fprintf ( stderr, "\t diff : diffusion rate of the density\n" );
- fprintf ( stderr, "\t visc : viscosity of the fluid\n" );
- fprintf ( stderr, "\t force : scales the mouse movement that generate a force\n" );
- fprintf ( stderr, "\t source : amount of density that will be deposited\n" );
- exit ( 1 );
- }
-
- printf ( "\n\n ==== Fluid Wall %s ==== \n", VERSION);
- printf ( " SIMULATION:\n");
- printf ( "\t Add densities with the right mouse button\n" );
- printf ( "\t Add bounds with the middle mouse button\n" );
- printf ( "\t Add velocities with the left mouse button and dragging the mouse\n" );
- printf ( "\t Toggle use of optical flow with the 'f' key.\n" );
- printf ( "\t Clear the simulation with the 'c' key\n" );
- printf ( " DISPLAY:\n");
- printf ( "\t Toggle fullscreen mode with the 'q' key.\n" );
- printf ( "\t Toggle density/velocity display with the 'v' key.\n" );
- printf ( "\t Toggle bounds display with the 'b' key.\n" );
- printf ( "\t Toggle users display with the 'u' key.\n" );
- printf ( " MODES:\n");
- printf ( "\t '0' key: Toggle Automatic Mode Change.\n" );
- printf ( "\t '1' key: Switch to mode 1: Single user, blue fluid.\n" );
- printf ( "\t '2' key: Switch to mode 2: Velocity Vector, no optical flow.\n" );
- printf ( "\t '3' key: Switch to mode 3: Multi-user, multicolor fluid.\n" );
- printf ( "\t '4' key: Switch to mode 4: Multi-user, white background.\n" );
- printf ( " KINECT:\n");
- printf ( "\t Increase Kinect depth thrshold angle with the 'o' key.\n" );
- printf ( "\t Decrease Kinect depth thrshold angle with the 'k' key.\n" );
- printf ( "\t Reset the Kinect with the + key \n\n" );
- printf ( " Quit with the 'ESC' key.\n" );
-
- dvel = false;
- dusers = false;
- dbound = false;
-
- if ( !allocateData() )
- exit ( 1 );
-
- clearData();
-
- winX = DEF_WINDOW_SIZE;
- winY = DEF_WINDOW_SIZE;
-
- openGlutWindow();
- glutMainLoop();
-}
\ No newline at end of file
diff --git a/Chapter9_FluidInteractionUsingKinect/fluidWall_2.cpp b/Chapter9_FluidInteractionUsingKinect/fluidWall_2.cpp
deleted file mode 100755
index 7fc1ee1..0000000
--- a/Chapter9_FluidInteractionUsingKinect/fluidWall_2.cpp
+++ /dev/null
@@ -1,1198 +0,0 @@
-/*****************************************************************************
-* Ch9 of the book "Mastering OpenCV with Practical Computer Vision Projects"
-* Copyright Packt Publishing 2012.
-* http://www.packtpub.com/cool-projects-with-opencv/book
-* http://code.google.com/p/fluidwall/
-*****************************************************************************/
-/**
- * @file fluidWall.cpp
- * @author Austin Hines
- * @copyright 2011 Austin Hines, Naureen Mahmood, and Texas A&M Dept. of Visualization
- * @version 1.0.1
- *
- * The main executable for Fluid Wall. Contains functions that define how the
- * KinectController class effects the FluidSolver classes. This defines the general
- * behavior of the simulation.
- *
- * This file is part of Fluid Wall. You can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Fluid Wall is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Fluid Wall. If not, see .
- *
- * Version History:
- * 1.0.1
- * - Added fullscreen toggle mode, bound to 'q' key.
- * - Removed 'q' key as quit.
- * - Added Version tag to GLUT window and command line output.
- * 1.0.0
- * - Initial Release
- *
- */
-
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-#include