diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000..1ff0c4230
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,63 @@
+###############################################################################
+# Set default behavior to automatically normalize line endings.
+###############################################################################
+* text=auto
+
+###############################################################################
+# Set default behavior for command prompt diff.
+#
+# This is need for earlier builds of msysgit that does not have it on by
+# default for csharp files.
+# Note: This is only used by command line
+###############################################################################
+#*.cs diff=csharp
+
+###############################################################################
+# Set the merge driver for project and solution files
+#
+# Merging from the command prompt will add diff markers to the files if there
+# are conflicts (Merging from VS is not affected by the settings below, in VS
+# the diff markers are never inserted). Diff markers may cause the following
+# file extensions to fail to load in VS. An alternative would be to treat
+# these files as binary and thus will always conflict and require user
+# intervention with every merge. To do so, just uncomment the entries below
+###############################################################################
+#*.sln merge=binary
+#*.csproj merge=binary
+#*.vbproj merge=binary
+#*.vcxproj merge=binary
+#*.vcproj merge=binary
+#*.dbproj merge=binary
+#*.fsproj merge=binary
+#*.lsproj merge=binary
+#*.wixproj merge=binary
+#*.modelproj merge=binary
+#*.sqlproj merge=binary
+#*.wwaproj merge=binary
+
+###############################################################################
+# behavior for image files
+#
+# image files are treated as binary by default.
+###############################################################################
+#*.jpg binary
+#*.png binary
+#*.gif binary
+
+###############################################################################
+# diff behavior for common document formats
+#
+# Convert binary document formats to text before diffing them. This feature
+# is only available from the command line. Turn it on by uncommenting the
+# entries below.
+###############################################################################
+#*.doc diff=astextplain
+#*.DOC diff=astextplain
+#*.docx diff=astextplain
+#*.DOCX diff=astextplain
+#*.dot diff=astextplain
+#*.DOT diff=astextplain
+#*.pdf diff=astextplain
+#*.PDF diff=astextplain
+#*.rtf diff=astextplain
+#*.RTF diff=astextplain
diff --git a/.gitignore b/.gitignore
index bdc3535f7..6c1515449 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,7 +16,9 @@ TestResults
# Build results
[Dd]ebug/
[Rr]elease/
-x64/
+#x64/
+OpenCvSharpExtern/x64
+OpenCvSharpExternGpu/x64
*_i.c
*_p.c
*.ilk
@@ -106,3 +108,10 @@ Generated_Code #added for RIA/Silverlight projects
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
+Help JP
+Help EN
+/src/Release-Static
+/src/OpenCvSharpExtern/Release-Static
+*.db
+/src/OpenCvSharp.Sandbox/dll/x86/OpenCvSharpExtern.dll
+/src/*.opendb
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 6600f1c98..000000000
--- a/LICENSE
+++ /dev/null
@@ -1,165 +0,0 @@
-GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 000000000..513efb3d1
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,33 @@
+By downloading, copying, installing or using the software you agree to this license.
+If you do not agree to this license, do not download, install,
+copy or use the software.
+
+
+ License Agreement
+ For Open Source Computer Vision Library
+ (3-clause BSD License)
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met
+
+ Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ andor other materials provided with the distribution.
+
+ Neither the names of the copyright holders nor the names of the contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+This software is provided by the copyright holders and contributors as is and
+any express or implied warranties, including, but not limited to, the implied
+warranties of merchantability and fitness for a particular purpose are disclaimed.
+In no event shall copyright holders or contributors be liable for any direct,
+indirect, incidental, special, exemplary, or consequential damages
+(including, but not limited to, procurement of substitute goods or services;
+loss of use, data, or profits; or business interruption) however caused
+and on any theory of liability, whether in contract, strict liability,
+or tort (including negligence or otherwise) arising in any way out of
+the use of this software, even if advised of the possibility of such damage.
\ No newline at end of file
diff --git a/README.md b/README.md
index e1e977f07..1905542eb 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,74 @@
-opencvsharp
-===========
+#OpenCvSharp
+Cross platform wrapper of OpenCV for .NET Framework.
-OpenCV wrapper for .NET Framework
+OpenCVを.NET Frameworkから利用するための、クロスプラットフォームで動作するラッパーです。
+
+## Installation
+### NuGet
+If you have Visual Studio 2012 or later, it is recommended to use [NuGet](http://www.nuget.org/). Search *'opencvsharp'* on the NuGet Package Manager.
+
+* [OpenCV3.1 All-in-one package](https://www.nuget.org/packages/OpenCvSharp3-AnyCPU/) - bundles native OpenCV DLLs
+* [OpenCV3.1 Minimum package](https://www.nuget.org/packages/OpenCvSharp3-WithoutDll/)
+* [OpenCV2.4.10 All-in-one package](https://www.nuget.org/packages/OpenCvSharp-AnyCPU/) - bundles native OpenCV DLLs
+* [OpenCV2.4.10 Minimum package](https://www.nuget.org/packages/OpenCvSharp-WithoutDll/)
+
+### Downloads
+If you do not use NuGet, get DLL files from the [release page](https://github.com/shimat/opencvsharp/releases).
+
+## Requirements
+* [OpenCV 3.1 / 2.4.10](http://opencv.org/)
+* [Visual C++ 2013 Redistributable Package](http://www.microsoft.com/en-US/download/details.aspx?id=30679)
+* [.NET Framework 2.0](http://www.microsoft.com/ja-jp/download/details.aspx?id=1639) or later / [Mono](http://www.mono-project.com/Main_Page)
+
+On Windows, OpenCvSharp requires OpenCV DLL files built with VC++2013 (msvcr120.dll). The official pre-built DLL files in build/[x86 or x64]/vc12/bin are suitable.
+
+## Documents
+http://shimat.github.io/opencvsharp/
+
+## Usage
+For more details, see the **[Wiki](https://github.com/shimat/opencvsharp/wiki)** page.
+
+```C#
+// Edge detection by Canny algorithm
+using OpenCvSharp;
+// using OpenCvSharp.CPlusPlus; //for OpenCvSharp2
+
+class Program
+{
+ static void Main()
+ {
+ Mat src = new Mat("lenna.png", ImreadModes.GrayScale); // OpenCvSharp 3.x
+ //Mat src = new Mat("lenna.png", LoadMode.GrayScale); // OpenCvSharp 2.4.x
+ Mat dst = new Mat();
+
+ Cv2.Canny(src, dst, 50, 200);
+ using (new Window("src image", src))
+ using (new Window("dst image", dst))
+ {
+ Cv2.WaitKey();
+ }
+ }
+}
+```
+
+## Features
+* OpenCvSharp is modeled on the native OpenCV C/C++ API style as much as possible.
+* Many classes of OpenCvSharp implement IDisposable. There is no need to manage unsafe resources.
+* OpenCvSharp does not force object-oriented programming style on you. You can also call native-style OpenCV functions.
+* OpenCvSharp provides functions for converting from Mat/IplImage into Bitmap(GDI+) or WriteableBitmap(WPF).
+* OpenCvSharp can work on [Mono](http://www.mono-project.com/Main_Page). It can run on any platform which [Mono](http://www.mono-project.com/Main_Page) supports (e.g. Linux).
+
+-----
+
+* オリジナルのC/C++コードと可能な限り似た記述ができるように設計しています。
+* 多くのクラスがIDisposableインターフェイスを実装しているので、ネイティブリソース管理が容易です。
+* オブジェクト指向な書き方を強制しません。OpenCVのネイティブの関数をそのままの形式で呼べます。
+* GDI+やWPFとの相互利用が可能です。OpenCVのMat/IplImageとGDI+のBitmapやWPFのWriteableBitmapとの変換機能があります。
+* [Mono](http://www.mono-project.com/Main_Page)に対応しています。Linux等のクロスプラットフォームで動作します。
+
+
+## License
+OpenCvSharp is licensed under the
+**BSD 3-Clause License**. See [LICENSE.txt](https://github.com/shimat/opencvsharp/blob/master/LICENSE.txt).
+
+OpenCvSharp.Blob uses [cvBlob](https://code.google.com/p/cvblob/) to implement blob extraction.
diff --git a/doc/OpenCvSharp_EN.shfbproj b/doc/OpenCvSharp_EN.shfbproj
new file mode 100644
index 000000000..b04e64abd
--- /dev/null
+++ b/doc/OpenCvSharp_EN.shfbproj
@@ -0,0 +1,62 @@
+
+
+
+
+ Debug
+ AnyCPU
+ 2.0
+ {69e191e1-d2ff-4983-be76-e92456997fa6}
+ 1.9.9.0
+
+ Documentation
+ Documentation
+ Documentation
+
+ .\Help EN\
+ Documentation
+ .NET Framework 3.5
+ HtmlHelp1, Website
+ OpenCV wrapper for .NET Framework
+ OpenCvSharp Class Library
+ en-US
+
+
+
+
+
+
+
+
+
+
+
+
+
+ OnlyWarningsAndErrors
+ True
+ True
+ False
+ True
+ True
+ 2
+ False
+ Standard
+ Blank
+ False
+ VS2010
+ False
+ Guid
+ AboveNamespaces
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/OpenCvSharp_JP.shfbproj b/doc/OpenCvSharp_JP.shfbproj
new file mode 100644
index 000000000..6fd5868d4
--- /dev/null
+++ b/doc/OpenCvSharp_JP.shfbproj
@@ -0,0 +1,62 @@
+
+
+
+
+ Debug
+ AnyCPU
+ 2.0
+ {69e191e1-d2ff-4983-be76-e92456997fa6}
+ 1.9.9.0
+
+ Documentation
+ Documentation
+ Documentation
+
+ .\Help JP\
+ Documentation
+ .NET Framework 3.5
+ HtmlHelp1, Website
+ OpenCV wrapper for .NET Framework
+ OpenCvSharp Class Library
+ ja-JP
+
+
+
+
+
+
+
+
+
+
+
+
+
+ OnlyWarningsAndErrors
+ True
+ True
+ False
+ True
+ True
+ 2
+ False
+ C#, Managed C++
+ Blank
+ False
+ VS2010
+ False
+ Guid
+ BelowNamespaces
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/help/OpenCvSharp.shfbproj b/help/OpenCvSharp.shfbproj
new file mode 100644
index 000000000..2b0a8b40e
--- /dev/null
+++ b/help/OpenCvSharp.shfbproj
@@ -0,0 +1,77 @@
+
+
+
+
+ Debug
+ AnyCPU
+ 2.0
+ {5cbd22b4-2d40-4629-9138-cf3dd157ee3e}
+ 2015.6.5.0
+
+ Documentation
+ Documentation
+ Documentation
+
+ .NET Framework 4.5
+ .\Help\
+ Documentation
+ en-US
+ OnlyWarningsAndErrors
+ HtmlHelp1, Website
+ False
+ True
+ False
+ True
+ AutoDocumentCtors, AutoDocumentDispose
+
+
+
+
+
+
+
+
+
+ 1.0.0.0
+ 2
+ False
+ C#, Visual Basic, Managed C++, F#, JavaScript
+ Blank
+ False
+ VS2013
+ False
+ Guid
+ A Sandcastle Documented Class Library
+ AboveNamespaces
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ OnBuildSuccess
+
+
\ No newline at end of file
diff --git a/lib/opencv/include/opencv/cv.h b/lib/opencv/include/opencv/cv.h
new file mode 100644
index 000000000..0aefc6d27
--- /dev/null
+++ b/lib/opencv/include/opencv/cv.h
@@ -0,0 +1,73 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_CV_H__
+#define __OPENCV_OLD_CV_H__
+
+#if defined(_MSC_VER)
+ #define CV_DO_PRAGMA(x) __pragma(x)
+ #define __CVSTR2__(x) #x
+ #define __CVSTR1__(x) __CVSTR2__(x)
+ #define __CVMSVCLOC__ __FILE__ "("__CVSTR1__(__LINE__)") : "
+ #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (__CVMSVCLOC__ _msg))
+#elif defined(__GNUC__)
+ #define CV_DO_PRAGMA(x) _Pragma (#x)
+ #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (_msg))
+#else
+ #define CV_DO_PRAGMA(x)
+ #define CV_MSG_PRAGMA(_msg)
+#endif
+#define CV_WARNING(x) CV_MSG_PRAGMA("Warning: " #x)
+
+//CV_WARNING("This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module")
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/photo/photo_c.h"
+#include "opencv2/video/tracking_c.h"
+#include "opencv2/objdetect/objdetect_c.h"
+
+#if !defined(CV_IMPL)
+#define CV_IMPL extern "C"
+#endif //CV_IMPL
+
+#endif // __OPENCV_OLD_CV_H_
diff --git a/lib/opencv/include/opencv/cv.hpp b/lib/opencv/include/opencv/cv.hpp
new file mode 100644
index 000000000..e498d7ac1
--- /dev/null
+++ b/lib/opencv/include/opencv/cv.hpp
@@ -0,0 +1,60 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_CV_HPP__
+#define __OPENCV_OLD_CV_HPP__
+
+//#if defined(__GNUC__)
+//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
+//#endif
+
+#include "cv.h"
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/photo.hpp"
+#include "opencv2/video.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/calib3d.hpp"
+#include "opencv2/objdetect.hpp"
+
+#endif
diff --git a/lib/opencv/include/opencv/cvaux.h b/lib/opencv/include/opencv/cvaux.h
new file mode 100644
index 000000000..fe86c5d98
--- /dev/null
+++ b/lib/opencv/include/opencv/cvaux.h
@@ -0,0 +1,57 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_AUX_H__
+#define __OPENCV_OLD_AUX_H__
+
+//#if defined(__GNUC__)
+//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
+//#endif
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/photo/photo_c.h"
+#include "opencv2/video/tracking_c.h"
+#include "opencv2/objdetect/objdetect_c.h"
+
+#endif
+
+/* End of file. */
diff --git a/lib/opencv/include/opencv/cvaux.hpp b/lib/opencv/include/opencv/cvaux.hpp
new file mode 100644
index 000000000..b0e60a303
--- /dev/null
+++ b/lib/opencv/include/opencv/cvaux.hpp
@@ -0,0 +1,52 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_AUX_HPP__
+#define __OPENCV_OLD_AUX_HPP__
+
+//#if defined(__GNUC__)
+//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
+//#endif
+
+#include "cvaux.h"
+#include "opencv2/core/utility.hpp"
+
+#endif
diff --git a/lib/opencv/include/opencv/cvwimage.h b/lib/opencv/include/opencv/cvwimage.h
new file mode 100644
index 000000000..de89c9270
--- /dev/null
+++ b/lib/opencv/include/opencv/cvwimage.h
@@ -0,0 +1,46 @@
+///////////////////////////////////////////////////////////////////////////////
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to
+// this license. If you do not agree to this license, do not download,
+// install, copy or use the software.
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2008, Google, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation or contributors may not be used to endorse
+// or promote products derived from this software without specific
+// prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is"
+// and any express or implied warranties, including, but not limited to, the
+// implied warranties of merchantability and fitness for a particular purpose
+// are disclaimed. In no event shall the Intel Corporation or contributors be
+// liable for any direct, indirect, incidental, special, exemplary, or
+// consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+
+
+#ifndef __OPENCV_OLD_WIMAGE_HPP__
+#define __OPENCV_OLD_WIMAGE_HPP__
+
+#include "opencv2/core/wimage.hpp"
+
+#endif
diff --git a/lib/opencv/include/opencv/cxcore.h b/lib/opencv/include/opencv/cxcore.h
new file mode 100644
index 000000000..0982bd750
--- /dev/null
+++ b/lib/opencv/include/opencv/cxcore.h
@@ -0,0 +1,52 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_CXCORE_H__
+#define __OPENCV_OLD_CXCORE_H__
+
+//#if defined(__GNUC__)
+//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
+//#endif
+
+#include "opencv2/core/core_c.h"
+
+#endif
diff --git a/lib/opencv/include/opencv/cxcore.hpp b/lib/opencv/include/opencv/cxcore.hpp
new file mode 100644
index 000000000..9af4ac746
--- /dev/null
+++ b/lib/opencv/include/opencv/cxcore.hpp
@@ -0,0 +1,53 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_CXCORE_HPP__
+#define __OPENCV_OLD_CXCORE_HPP__
+
+//#if defined(__GNUC__)
+//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
+//#endif
+
+#include "cxcore.h"
+#include "opencv2/core.hpp"
+
+#endif
diff --git a/lib/opencv/include/opencv/cxeigen.hpp b/lib/opencv/include/opencv/cxeigen.hpp
new file mode 100644
index 000000000..1f04d1a3a
--- /dev/null
+++ b/lib/opencv/include/opencv/cxeigen.hpp
@@ -0,0 +1,48 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_EIGEN_HPP__
+#define __OPENCV_OLD_EIGEN_HPP__
+
+#include "opencv2/core/eigen.hpp"
+
+#endif
diff --git a/lib/opencv/include/opencv/cxmisc.h b/lib/opencv/include/opencv/cxmisc.h
new file mode 100644
index 000000000..6c93a0cce
--- /dev/null
+++ b/lib/opencv/include/opencv/cxmisc.h
@@ -0,0 +1,8 @@
+#ifndef __OPENCV_OLD_CXMISC_H__
+#define __OPENCV_OLD_CXMISC_H__
+
+#ifdef __cplusplus
+# include "opencv2/core/utility.hpp"
+#endif
+
+#endif
diff --git a/lib/opencv/include/opencv/highgui.h b/lib/opencv/include/opencv/highgui.h
new file mode 100644
index 000000000..0261029c0
--- /dev/null
+++ b/lib/opencv/include/opencv/highgui.h
@@ -0,0 +1,48 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_HIGHGUI_H__
+#define __OPENCV_OLD_HIGHGUI_H__
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/highgui/highgui_c.h"
+
+#endif
diff --git a/lib/opencv/include/opencv/ml.h b/lib/opencv/include/opencv/ml.h
new file mode 100644
index 000000000..d8e967f81
--- /dev/null
+++ b/lib/opencv/include/opencv/ml.h
@@ -0,0 +1,47 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OLD_ML_H__
+#define __OPENCV_OLD_ML_H__
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/ml.hpp"
+
+#endif
diff --git a/lib/opencv/include/opencv2/calib3d.hpp b/lib/opencv/include/opencv2/calib3d.hpp
new file mode 100644
index 000000000..ddffffecd
--- /dev/null
+++ b/lib/opencv/include/opencv2/calib3d.hpp
@@ -0,0 +1,2001 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CALIB3D_HPP__
+#define __OPENCV_CALIB3D_HPP__
+
+#include "opencv2/core.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/core/affine.hpp"
+
+/**
+ @defgroup calib3d Camera Calibration and 3D Reconstruction
+
+The functions in this section use a so-called pinhole camera model. In this model, a scene view is
+formed by projecting 3D points into the image plane using a perspective transformation.
+
+\f[s \; m' = A [R|t] M'\f]
+
+or
+
+\f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}
+\begin{bmatrix}
+r_{11} & r_{12} & r_{13} & t_1 \\
+r_{21} & r_{22} & r_{23} & t_2 \\
+r_{31} & r_{32} & r_{33} & t_3
+\end{bmatrix}
+\begin{bmatrix}
+X \\
+Y \\
+Z \\
+1
+\end{bmatrix}\f]
+
+where:
+
+- \f$(X, Y, Z)\f$ are the coordinates of a 3D point in the world coordinate space
+- \f$(u, v)\f$ are the coordinates of the projection point in pixels
+- \f$A\f$ is a camera matrix, or a matrix of intrinsic parameters
+- \f$(cx, cy)\f$ is a principal point that is usually at the image center
+- \f$fx, fy\f$ are the focal lengths expressed in pixel units.
+
+Thus, if an image from the camera is scaled by a factor, all of these parameters should be scaled
+(multiplied/divided, respectively) by the same factor. The matrix of intrinsic parameters does not
+depend on the scene viewed. So, once estimated, it can be re-used as long as the focal length is
+fixed (in case of zoom lens). The joint rotation-translation matrix \f$[R|t]\f$ is called a matrix of
+extrinsic parameters. It is used to describe the camera motion around a static scene, or vice versa,
+rigid motion of an object in front of a still camera. That is, \f$[R|t]\f$ translates coordinates of a
+point \f$(X, Y, Z)\f$ to a coordinate system, fixed with respect to the camera. The transformation above
+is equivalent to the following (when \f$z \ne 0\f$ ):
+
+\f[\begin{array}{l}
+\vecthree{x}{y}{z} = R \vecthree{X}{Y}{Z} + t \\
+x' = x/z \\
+y' = y/z \\
+u = f_x*x' + c_x \\
+v = f_y*y' + c_y
+\end{array}\f]
+
+Real lenses usually have some distortion, mostly radial distortion and slight tangential distortion.
+So, the above model is extended as:
+
+\f[\begin{array}{l}
+\vecthree{x}{y}{z} = R \vecthree{X}{Y}{Z} + t \\
+x' = x/z \\
+y' = y/z \\
+x'' = x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\
+y'' = y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
+\text{where} \quad r^2 = x'^2 + y'^2 \\
+u = f_x*x'' + c_x \\
+v = f_y*y'' + c_y
+\end{array}\f]
+
+\f$k_1\f$, \f$k_2\f$, \f$k_3\f$, \f$k_4\f$, \f$k_5\f$, and \f$k_6\f$ are radial distortion coefficients. \f$p_1\f$ and \f$p_2\f$ are
+tangential distortion coefficients. \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, are the thin prism distortion
+coefficients. Higher-order coefficients are not considered in OpenCV.
+
+In some cases the image sensor may be tilted in order to focus an oblique plane in front of the
+camera (Scheimpfug condition). This can be useful for particle image velocimetry (PIV) or
+triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and
+\f$y''\f$. This distortion can be modelled in the following way, see e.g. @cite Louhichi07.
+
+\f[\begin{array}{l}
+s\vecthree{x'''}{y'''}{1} =
+\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}(\tau_x, \tau_y)}
+{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
+{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
+u = f_x*x''' + c_x \\
+v = f_y*y''' + c_y
+\end{array}\f]
+
+where the matrix \f$R(\tau_x, \tau_y)\f$ is defined by two rotations with angular parameter \f$\tau_x\f$
+and \f$\tau_y\f$, respectively,
+
+\f[
+R(\tau_x, \tau_y) =
+\vecthreethree{\cos(\tau_y)}{0}{-\sin(\tau_y)}{0}{1}{0}{\sin(\tau_y)}{0}{\cos(\tau_y)}
+\vecthreethree{1}{0}{0}{0}{\cos(\tau_x)}{\sin(\tau_x)}{0}{-\sin(\tau_x)}{\cos(\tau_x)} =
+\vecthreethree{\cos(\tau_y)}{\sin(\tau_y)\sin(\tau_x)}{-\sin(\tau_y)\cos(\tau_x)}
+{0}{\cos(\tau_x)}{\sin(\tau_x)}
+{\sin(\tau_y)}{-\cos(\tau_y)\sin(\tau_x)}{\cos(\tau_y)\cos(\tau_x)}.
+\f]
+
+In the functions below the coefficients are passed or returned as
+
+\f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f]
+
+vector. That is, if the vector contains four elements, it means that \f$k_3=0\f$ . The distortion
+coefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera
+parameters. And they remain the same regardless of the captured image resolution. If, for example, a
+camera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion
+coefficients can be used for 640 x 480 images from the same camera while \f$f_x\f$, \f$f_y\f$, \f$c_x\f$, and
+\f$c_y\f$ need to be scaled appropriately.
+
+The functions below use the above model to do the following:
+
+- Project 3D points to the image plane given intrinsic and extrinsic parameters.
+- Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their
+projections.
+- Estimate intrinsic and extrinsic camera parameters from several views of a known calibration
+pattern (every view is described by several 3D-2D point correspondences).
+- Estimate the relative position and orientation of the stereo camera "heads" and compute the
+*rectification* transformation that makes the camera optical axes parallel.
+
+@note
+ - A calibration sample for 3 cameras in horizontal position can be found at
+ opencv_source_code/samples/cpp/3calibration.cpp
+ - A calibration sample based on a sequence of images can be found at
+ opencv_source_code/samples/cpp/calibration.cpp
+ - A calibration sample in order to do 3D reconstruction can be found at
+ opencv_source_code/samples/cpp/build3dmodel.cpp
+ - A calibration sample of an artificially generated camera and chessboard patterns can be
+ found at opencv_source_code/samples/cpp/calibration_artificial.cpp
+ - A calibration example on stereo calibration can be found at
+ opencv_source_code/samples/cpp/stereo_calib.cpp
+ - A calibration example on stereo matching can be found at
+ opencv_source_code/samples/cpp/stereo_match.cpp
+ - (Python) A camera calibration sample can be found at
+ opencv_source_code/samples/python/calibrate.py
+
+ @{
+ @defgroup calib3d_fisheye Fisheye camera model
+
+ Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the
+ matrix X) The coordinate vector of P in the camera reference frame is:
+
+ \f[Xc = R X + T\f]
+
+ where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y
+ and z the 3 coordinates of Xc:
+
+ \f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f]
+
+ The pinehole projection coordinates of P is [a; b] where
+
+ \f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f]
+
+ Fisheye distortion:
+
+ \f[\theta_d = \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8)\f]
+
+ The distorted point coordinates are [x'; y'] where
+
+ \f[x' = (\theta_d / r) x \\ y' = (\theta_d / r) y \f]
+
+ Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:
+
+ \f[u = f_x (x' + \alpha y') + c_x \\
+ v = f_y yy + c_y\f]
+
+ @defgroup calib3d_c C API
+
+ @}
+ */
+
+namespace cv
+{
+
+//! @addtogroup calib3d
+//! @{
+
+//! type of the robust estimation algorithm
+enum { LMEDS = 4, //!< least-median algorithm
+ RANSAC = 8, //!< RANSAC algorithm
+ RHO = 16 //!< RHO algorithm
+ };
+
+enum { SOLVEPNP_ITERATIVE = 0,
+ SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
+ SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
+ SOLVEPNP_DLS = 3, //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
+ SOLVEPNP_UPNP = 4 //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
+
+};
+
+enum { CALIB_CB_ADAPTIVE_THRESH = 1,
+ CALIB_CB_NORMALIZE_IMAGE = 2,
+ CALIB_CB_FILTER_QUADS = 4,
+ CALIB_CB_FAST_CHECK = 8
+ };
+
+enum { CALIB_CB_SYMMETRIC_GRID = 1,
+ CALIB_CB_ASYMMETRIC_GRID = 2,
+ CALIB_CB_CLUSTERING = 4
+ };
+
+enum { CALIB_USE_INTRINSIC_GUESS = 0x00001,
+ CALIB_FIX_ASPECT_RATIO = 0x00002,
+ CALIB_FIX_PRINCIPAL_POINT = 0x00004,
+ CALIB_ZERO_TANGENT_DIST = 0x00008,
+ CALIB_FIX_FOCAL_LENGTH = 0x00010,
+ CALIB_FIX_K1 = 0x00020,
+ CALIB_FIX_K2 = 0x00040,
+ CALIB_FIX_K3 = 0x00080,
+ CALIB_FIX_K4 = 0x00800,
+ CALIB_FIX_K5 = 0x01000,
+ CALIB_FIX_K6 = 0x02000,
+ CALIB_RATIONAL_MODEL = 0x04000,
+ CALIB_THIN_PRISM_MODEL = 0x08000,
+ CALIB_FIX_S1_S2_S3_S4 = 0x10000,
+ CALIB_TILTED_MODEL = 0x40000,
+ CALIB_FIX_TAUX_TAUY = 0x80000,
+ // only for stereo
+ CALIB_FIX_INTRINSIC = 0x00100,
+ CALIB_SAME_FOCAL_LENGTH = 0x00200,
+ // for stereo rectification
+ CALIB_ZERO_DISPARITY = 0x00400,
+ CALIB_USE_LU = (1 << 17), //!< use LU instead of SVD decomposition for solving. much faster but potentially less precise
+ };
+
+//! the algorithm for finding fundamental matrix
+enum { FM_7POINT = 1, //!< 7-point algorithm
+ FM_8POINT = 2, //!< 8-point algorithm
+ FM_LMEDS = 4, //!< least-median algorithm
+ FM_RANSAC = 8 //!< RANSAC algorithm
+ };
+
+
+
+/** @brief Converts a rotation matrix to a rotation vector or vice versa.
+
+@param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
+@param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
+@param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
+derivatives of the output array components with respect to the input array components.
+
+\f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos{\theta} I + (1- \cos{\theta} ) r r^T + \sin{\theta} \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f]
+
+Inverse transformation can be also done easily, since
+
+\f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f]
+
+A rotation vector is a convenient and most compact representation of a rotation matrix (since any
+rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
+optimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .
+ */
+CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
+
+/** @brief Finds a perspective transformation between two planes.
+
+@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
+or vector\ .
+@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
+a vector\ .
+@param method Method used to computed a homography matrix. The following methods are possible:
+- **0** - a regular method using all the points
+- **RANSAC** - RANSAC-based robust method
+- **LMEDS** - Least-Median robust method
+- **RHO** - PROSAC-based robust method
+@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
+(used in the RANSAC and RHO methods only). That is, if
+\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \| > \texttt{ransacReprojThreshold}\f]
+then the point \f$i\f$ is considered an outlier. If srcPoints and dstPoints are measured in pixels,
+it usually makes sense to set this parameter somewhere in the range of 1 to 10.
+@param mask Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input
+mask values are ignored.
+@param maxIters The maximum number of RANSAC iterations, 2000 is the maximum it can be.
+@param confidence Confidence level, between 0 and 1.
+
+The functions find and return the perspective transformation \f$H\f$ between the source and the
+destination planes:
+
+\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f]
+
+so that the back-projection error
+
+\f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f]
+
+is minimized. If the parameter method is set to the default value 0, the function uses all the point
+pairs to compute an initial homography estimate with a simple least-squares scheme.
+
+However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective
+transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
+you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
+random subsets of the corresponding point pairs (of four pairs each), estimate the homography matrix
+using this subset and a simple least-square algorithm, and then compute the quality/goodness of the
+computed homography (which is the number of inliers for RANSAC or the median re-projection error for
+LMeDs). The best subset is then used to produce the initial estimate of the homography matrix and
+the mask of inliers/outliers.
+
+Regardless of the method, robust or not, the computed homography matrix is refined further (using
+inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
+re-projection error even more.
+
+The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
+distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
+correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
+noise is rather small, use the default method (method=0).
+
+The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
+determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an H matrix
+cannot be estimated, an empty one will be returned.
+
+@sa
+ getAffineTransform, getPerspectiveTransform, estimateRigidTransform, warpPerspective,
+ perspectiveTransform
+
+@note
+ - A example on calculating a homography for image matching can be found at
+ opencv_source_code/samples/cpp/video_homography.cpp
+
+ */
+CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
+ int method = 0, double ransacReprojThreshold = 3,
+ OutputArray mask=noArray(), const int maxIters = 2000,
+ const double confidence = 0.995);
+
+/** @overload */
+CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
+ OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
+
+/** @brief Computes an RQ decomposition of 3x3 matrices.
+
+@param src 3x3 input matrix.
+@param mtxR Output 3x3 upper-triangular matrix.
+@param mtxQ Output 3x3 orthogonal matrix.
+@param Qx Optional output 3x3 rotation matrix around x-axis.
+@param Qy Optional output 3x3 rotation matrix around y-axis.
+@param Qz Optional output 3x3 rotation matrix around z-axis.
+
+The function computes a RQ decomposition using the given rotations. This function is used in
+decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
+and a rotation matrix.
+
+It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
+degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
+sequence of rotations about the three principle axes that results in the same orientation of an
+object, eg. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angules
+are only one of the possible solutions.
+ */
+CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
+ OutputArray Qx = noArray(),
+ OutputArray Qy = noArray(),
+ OutputArray Qz = noArray());
+
+/** @brief Decomposes a projection matrix into a rotation matrix and a camera matrix.
+
+@param projMatrix 3x4 input projection matrix P.
+@param cameraMatrix Output 3x3 camera matrix K.
+@param rotMatrix Output 3x3 external rotation matrix R.
+@param transVect Output 4x1 translation vector T.
+@param rotMatrixX Optional 3x3 rotation matrix around x-axis.
+@param rotMatrixY Optional 3x3 rotation matrix around y-axis.
+@param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
+@param eulerAngles Optional three-element vector containing three Euler angles of rotation in
+degrees.
+
+The function computes a decomposition of a projection matrix into a calibration and a rotation
+matrix and the position of a camera.
+
+It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
+be used in OpenGL. Note, there is always more than one sequence of rotations about the three
+principle axes that results in the same orientation of an object, eg. see @cite Slabaugh . Returned
+tree rotation matrices and corresponding three Euler angules are only one of the possible solutions.
+
+The function is based on RQDecomp3x3 .
+ */
+CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
+ OutputArray rotMatrix, OutputArray transVect,
+ OutputArray rotMatrixX = noArray(),
+ OutputArray rotMatrixY = noArray(),
+ OutputArray rotMatrixZ = noArray(),
+ OutputArray eulerAngles =noArray() );
+
+/** @brief Computes partial derivatives of the matrix product for each multiplied matrix.
+
+@param A First multiplied matrix.
+@param B Second multiplied matrix.
+@param dABdA First output derivative matrix d(A\*B)/dA of size
+\f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ .
+@param dABdB Second output derivative matrix d(A\*B)/dB of size
+\f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ .
+
+The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to
+the elements of each of the two input matrices. The function is used to compute the Jacobian
+matrices in stereoCalibrate but can also be used in any other similar optimization function.
+ */
+CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
+
+/** @brief Combines two rotation-and-shift transformations.
+
+@param rvec1 First rotation vector.
+@param tvec1 First translation vector.
+@param rvec2 Second rotation vector.
+@param tvec2 Second translation vector.
+@param rvec3 Output rotation vector of the superposition.
+@param tvec3 Output translation vector of the superposition.
+@param dr3dr1
+@param dr3dt1
+@param dr3dr2
+@param dr3dt2
+@param dt3dr1
+@param dt3dt1
+@param dt3dr2
+@param dt3dt2 Optional output derivatives of rvec3 or tvec3 with regard to rvec1, rvec2, tvec1 and
+tvec2, respectively.
+
+The functions compute:
+
+\f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f]
+
+where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and
+\f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See Rodrigues for details.
+
+Also, the functions can compute the derivatives of the output vectors with regards to the input
+vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
+your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
+function that contains a matrix multiplication.
+ */
+CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
+ InputArray rvec2, InputArray tvec2,
+ OutputArray rvec3, OutputArray tvec3,
+ OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),
+ OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),
+ OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
+ OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
+
+/** @brief Projects 3D points to an image plane.
+
+@param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or
+vector\ ), where N is the number of points in the view.
+@param rvec Rotation vector. See Rodrigues for details.
+@param tvec Translation vector.
+@param cameraMatrix Camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.
+@param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
+vector\ .
+@param jacobian Optional output 2Nx(10+\) jacobian matrix of derivatives of image
+points with respect to components of the rotation vector, translation vector, focal lengths,
+coordinates of the principal point and the distortion coefficients. In the old interface different
+components of the jacobian are returned via different output parameters.
+@param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
+function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian
+matrix.
+
+The function computes projections of 3D points to the image plane given intrinsic and extrinsic
+camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
+image points coordinates (as functions of all the input parameters) with respect to the particular
+parameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in
+calibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a
+re-projection error given the current intrinsic and extrinsic parameters.
+
+@note By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by
+passing zero distortion coefficients, you can get various useful partial cases of the function. This
+means that you can compute the distorted coordinates for a sparse set of points or apply a
+perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
+ */
+CV_EXPORTS_W void projectPoints( InputArray objectPoints,
+ InputArray rvec, InputArray tvec,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ OutputArray imagePoints,
+ OutputArray jacobian = noArray(),
+ double aspectRatio = 0 );
+
+/** @brief Finds an object pose from 3D-2D point correspondences.
+
+@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or
+1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
+@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,
+where N is the number of points. vector\ can be also passed here.
+@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
+assumed.
+@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
+the model coordinate system to the camera coordinate system.
+@param tvec Output translation vector.
+@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
+the provided rvec and tvec values as initial approximations of the rotation and translation
+vectors, respectively, and further optimizes them.
+@param flags Method for solving a PnP problem:
+- **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In
+this case the function finds such a pose that minimizes reprojection error, that is the sum
+of squared distances between the observed projections imagePoints and the projected (using
+projectPoints ) objectPoints .
+- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
+"Complete Solution Classification for the Perspective-Three-Point Problem". In this case the
+function requires exactly four object and image points.
+- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
+paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation".
+- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
+"A Direct Least-Squares (DLS) Method for PnP".
+- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
+F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
+Estimation". In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
+assuming that both have the same value. Then the cameraMatrix is updated with the estimated
+focal length.
+
+The function estimates the object pose given a set of object points, their corresponding image
+projections, as well as the camera matrix and the distortion coefficients.
+
+@note
+ - An example of how to use solvePnP for planar augmented reality can be found at
+ opencv_source_code/samples/python/plane_ar.py
+ - If you are using Python:
+ - Numpy array slices won't work as input because solvePnP requires contiguous
+ arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
+ modules/calib3d/src/solvepnp.cpp version 2.4.9)
+ - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
+ to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
+ which requires 2-channel information.
+ - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
+ it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
+ np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
+ */
+CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ OutputArray rvec, OutputArray tvec,
+ bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE );
+
+/** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
+
+@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or
+1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
+@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,
+where N is the number of points. vector\ can be also passed here.
+@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
+assumed.
+@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
+the model coordinate system to the camera coordinate system.
+@param tvec Output translation vector.
+@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
+the provided rvec and tvec values as initial approximations of the rotation and translation
+vectors, respectively, and further optimizes them.
+@param iterationsCount Number of iterations.
+@param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
+is the maximum allowed distance between the observed and computed point projections to consider it
+an inlier.
+@param confidence The probability that the algorithm produces a useful result.
+@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
+@param flags Method for solving a PnP problem (see solvePnP ).
+
+The function estimates an object pose given a set of object points, their corresponding image
+projections, as well as the camera matrix and the distortion coefficients. This function finds such
+a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
+projections imagePoints and the projected (using projectPoints ) objectPoints. The use of RANSAC
+makes the function resistant to outliers.
+
+@note
+ - An example of how to use solvePNPRansac for object detection can be found at
+ opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
+ */
+CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ OutputArray rvec, OutputArray tvec,
+ bool useExtrinsicGuess = false, int iterationsCount = 100,
+ float reprojectionError = 8.0, double confidence = 0.99,
+ OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
+
+/** @brief Finds an initial camera matrix from 3D-2D point correspondences.
+
+@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
+coordinate space. In the old interface all the per-view vectors are concatenated. See
+calibrateCamera for details.
+@param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
+old interface all the per-view vectors are concatenated.
+@param imageSize Image size in pixels used to initialize the principal point.
+@param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently.
+Otherwise, \f$f_x = f_y * \texttt{aspectRatio}\f$ .
+
+The function estimates and returns an initial camera matrix for the camera calibration process.
+Currently, the function only supports planar calibration patterns, which are patterns where each
+object point has z-coordinate =0.
+ */
+CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints,
+ Size imageSize, double aspectRatio = 1.0 );
+
+/** @brief Finds the positions of internal corners of the chessboard.
+
+@param image Source chessboard view. It must be an 8-bit grayscale or color image.
+@param patternSize Number of inner corners per a chessboard row and column
+( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ).
+@param corners Output array of detected corners.
+@param flags Various operation flags that can be zero or a combination of the following values:
+- **CV_CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black
+and white, rather than a fixed threshold level (computed from the average image brightness).
+- **CV_CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before
+applying fixed or adaptive thresholding.
+- **CV_CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,
+square-like shape) to filter out false quads extracted at the contour retrieval stage.
+- **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners,
+and shortcut the call if none is found. This can drastically speed up the call in the
+degenerate condition when no chessboard is observed.
+
+The function attempts to determine whether the input image is a view of the chessboard pattern and
+locate the internal chessboard corners. The function returns a non-zero value if all of the corners
+are found and they are placed in a certain order (row by row, left to right in every row).
+Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
+a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
+squares touch each other. The detected coordinates are approximate, and to determine their positions
+more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
+different parameters if returned coordinates are not accurate enough.
+
+Sample usage of detecting and drawing chessboard corners: :
+@code
+ Size patternsize(8,6); //interior number of corners
+ Mat gray = ....; //source image
+ vector corners; //this will be filled by the detected corners
+
+ //CALIB_CB_FAST_CHECK saves a lot of time on images
+ //that do not contain any chessboard corners
+ bool patternfound = findChessboardCorners(gray, patternsize, corners,
+ CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
+ + CALIB_CB_FAST_CHECK);
+
+ if(patternfound)
+ cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
+ TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
+
+ drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
+@endcode
+@note The function requires white space (like a square-thick border, the wider the better) around
+the board to make the detection more robust in various environments. Otherwise, if there is no
+border and the background is dark, the outer black squares cannot be segmented properly and so the
+square grouping and ordering algorithm fails.
+ */
+CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
+ int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
+
+//! finds subpixel-accurate positions of the chessboard corners
+CV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
+
+/** @brief Renders the detected chessboard corners.
+
+@param image Destination image. It must be an 8-bit color image.
+@param patternSize Number of inner corners per a chessboard row and column
+(patternSize = cv::Size(points_per_row,points_per_column)).
+@param corners Array of detected corners, the output of findChessboardCorners.
+@param patternWasFound Parameter indicating whether the complete board was found or not. The
+return value of findChessboardCorners should be passed here.
+
+The function draws individual chessboard corners detected either as red circles if the board was not
+found, or as colored corners connected with lines if the board was found.
+ */
+CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
+ InputArray corners, bool patternWasFound );
+
+/** @brief Finds centers in the grid of circles.
+
+@param image grid view of input circles; it must be an 8-bit grayscale or color image.
+@param patternSize number of circles per row and column
+( patternSize = Size(points_per_row, points_per_colum) ).
+@param centers output array of detected centers.
+@param flags various operation flags that can be one of the following values:
+- **CALIB_CB_SYMMETRIC_GRID** uses symmetric pattern of circles.
+- **CALIB_CB_ASYMMETRIC_GRID** uses asymmetric pattern of circles.
+- **CALIB_CB_CLUSTERING** uses a special algorithm for grid detection. It is more robust to
+perspective distortions but much more sensitive to background clutter.
+@param blobDetector feature detector that finds blobs like dark circles on light background.
+
+The function attempts to determine whether the input image contains a grid of circles. If it is, the
+function locates centers of the circles. The function returns a non-zero value if all of the centers
+have been found and they have been placed in a certain order (row by row, left to right in every
+row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
+
+Sample usage of detecting and drawing the centers of circles: :
+@code
+ Size patternsize(7,7); //number of centers
+ Mat gray = ....; //source image
+ vector centers; //this will be filled by the detected centers
+
+ bool patternfound = findCirclesGrid(gray, patternsize, centers);
+
+ drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
+@endcode
+@note The function requires white space (like a square-thick border, the wider the better) around
+the board to make the detection more robust in various environments.
+ */
+CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
+ OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
+ const Ptr &blobDetector = SimpleBlobDetector::create());
+
+/** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
+
+@param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
+the calibration pattern coordinate space (e.g. std::vector>). The outer
+vector contains as many elements as the number of the pattern views. If the same calibration pattern
+is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
+possible to use partially occluded patterns, or even different patterns in different views. Then,
+the vectors will be different. The points are 3D, but since they are in a pattern coordinate system,
+then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that
+Z-coordinate of each input object point is 0.
+In the old interface all the vectors of object points from different views are concatenated
+together.
+@param imagePoints In the new interface it is a vector of vectors of the projections of calibration
+pattern points (e.g. std::vector>). imagePoints.size() and
+objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.
+In the old interface all the vectors of object points from different views are concatenated
+together.
+@param imageSize Size of the image used only to initialize the intrinsic camera matrix.
+@param cameraMatrix Output 3x3 floating-point camera matrix
+\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
+and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
+initialized before calling the function.
+@param distCoeffs Output vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements.
+@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view
+(e.g. std::vector>). That is, each k-th rotation vector together with the corresponding
+k-th translation vector (see the next output parameter description) brings the calibration pattern
+from the model coordinate space (in which object points are specified) to the world coordinate
+space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
+@param tvecs Output vector of translation vectors estimated for each pattern view.
+@param flags Different flags that may be zero or a combination of the following values:
+- **CV_CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
+fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
+center ( imageSize is used), and focal distances are computed in a least-squares fashion.
+Note, that if intrinsic parameters are known, there is no need to use this function just to
+estimate extrinsic parameters. Use solvePnP instead.
+- **CV_CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
+optimization. It stays at the center or at a different location specified when
+CV_CALIB_USE_INTRINSIC_GUESS is set too.
+- **CV_CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The
+ratio fx/fy stays the same as in the input cameraMatrix . When
+CV_CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
+ignored, only their ratio is computed and used further.
+- **CV_CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
+to zeros and stay zero.
+- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** The corresponding radial distortion
+coefficient is not changed during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is
+set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+- **CV_CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the rational model and return 8 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the thin prism model and return 12 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
+the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+- **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during
+the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+@param criteria Termination criteria for the iterative optimization algorithm.
+
+The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
+views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object
+points and their corresponding 2D projections in each view must be specified. That may be achieved
+by using an object with a known geometry and easily detectable feature points. Such an object is
+called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
+a calibration rig (see findChessboardCorners ). Currently, initialization of intrinsic parameters
+(when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
+patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
+be used as long as initial cameraMatrix is provided.
+
+The algorithm performs the following steps:
+
+- Compute the initial intrinsic parameters (the option only available for planar calibration
+ patterns) or read them from the input parameters. The distortion coefficients are all set to
+ zeros initially unless some of CV_CALIB_FIX_K? are specified.
+
+- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
+ done using solvePnP .
+
+- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
+ that is, the total sum of squared distances between the observed feature points imagePoints and
+ the projected (using the current estimates for camera parameters and the poses) object points
+ objectPoints. See projectPoints for details.
+
+The function returns the final re-projection error.
+
+@note
+ If you use a non-square (=non-NxN) grid and findChessboardCorners for calibration, and
+ calibrateCamera returns bad values (zero distortion coefficients, an image center very far from
+ (w/2-0.5,h/2-0.5), and/or large differences between \f$f_x\f$ and \f$f_y\f$ (ratios of 10:1 or more)),
+ then you have probably used patternSize=cvSize(rows,cols) instead of using
+ patternSize=cvSize(cols,rows) in findChessboardCorners .
+
+@sa
+ findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
+ */
+CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints, Size imageSize,
+ InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
+ OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
+ int flags = 0, TermCriteria criteria = TermCriteria(
+ TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
+
+/** @brief Computes useful camera characteristics from the camera matrix.
+
+@param cameraMatrix Input camera matrix that can be estimated by calibrateCamera or
+stereoCalibrate .
+@param imageSize Input image size in pixels.
+@param apertureWidth Physical width in mm of the sensor.
+@param apertureHeight Physical height in mm of the sensor.
+@param fovx Output field of view in degrees along the horizontal sensor axis.
+@param fovy Output field of view in degrees along the vertical sensor axis.
+@param focalLength Focal length of the lens in mm.
+@param principalPoint Principal point in mm.
+@param aspectRatio \f$f_y/f_x\f$
+
+The function computes various useful camera characteristics from the previously estimated camera
+matrix.
+
+@note
+ Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
+ the chessboard pitch (it can thus be any value).
+ */
+CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
+ double apertureWidth, double apertureHeight,
+ CV_OUT double& fovx, CV_OUT double& fovy,
+ CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
+ CV_OUT double& aspectRatio );
+
+/** @brief Calibrates the stereo camera.
+
+@param objectPoints Vector of vectors of the calibration pattern points.
+@param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
+observed by the first camera.
+@param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
+observed by the second camera.
+@param cameraMatrix1 Input/output first camera matrix:
+\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
+any of CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO ,
+CV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
+matrix components must be initialized. See the flags description for details.
+@param distCoeffs1 Input/output vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements. The output vector length depends on the flags.
+@param cameraMatrix2 Input/output second camera matrix. The parameter is similar to cameraMatrix1
+@param distCoeffs2 Input/output lens distortion coefficients for the second camera. The parameter
+is similar to distCoeffs1 .
+@param imageSize Size of the image used only to initialize intrinsic camera matrix.
+@param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
+@param T Output translation vector between the coordinate systems of the cameras.
+@param E Output essential matrix.
+@param F Output fundamental matrix.
+@param flags Different flags that may be zero or a combination of the following values:
+- **CV_CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
+matrices are estimated.
+- **CV_CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters
+according to the specified flags. Initial values are provided by the user.
+- **CV_CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.
+- **CV_CALIB_FIX_FOCAL_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
+- **CV_CALIB_FIX_ASPECT_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
+.
+- **CV_CALIB_SAME_FOCAL_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
+- **CV_CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to
+zeros and fix there.
+- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** Do not change the corresponding radial
+distortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set,
+the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+- **CV_CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
+compatibility, this extra flag should be explicitly specified to make the calibration
+function use the rational model and return 8 coefficients. If the flag is not set, the
+function computes and returns only 5 distortion coefficients.
+- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the thin prism model and return 12 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
+the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+- **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the
+backward compatibility, this extra flag should be explicitly specified to make the
+calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
+set, the function computes and returns only 5 distortion coefficients.
+- **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during
+the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+supplied distCoeffs matrix is used. Otherwise, it is set to 0.
+@param criteria Termination criteria for the iterative optimization algorithm.
+
+The function estimates transformation between two cameras making a stereo pair. If you have a stereo
+camera where the relative position and orientation of two cameras is fixed, and if you computed
+poses of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2),
+respectively (this can be done with solvePnP ), then those poses definitely relate to each other.
+This means that, given ( \f$R_1\f$,\f$T_1\f$ ), it should be possible to compute ( \f$R_2\f$,\f$T_2\f$ ). You only
+need to know the position and orientation of the second camera relative to the first camera. This is
+what the described function does. It computes ( \f$R\f$,\f$T\f$ ) so that:
+
+\f[R_2=R*R_1
+T_2=R*T_1 + T,\f]
+
+Optionally, it computes the essential matrix E:
+
+\f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} *R\f]
+
+where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ . And the function
+can also compute the fundamental matrix F:
+
+\f[F = cameraMatrix2^{-T} E cameraMatrix1^{-1}\f]
+
+Besides the stereo-related information, the function can also perform a full calibration of each of
+two cameras. However, due to the high dimensionality of the parameter space and noise in the input
+data, the function can diverge from the correct solution. If the intrinsic parameters can be
+estimated with high accuracy for each of the cameras individually (for example, using
+calibrateCamera ), you are recommended to do so and then pass CV_CALIB_FIX_INTRINSIC flag to the
+function along with the computed intrinsic parameters. Otherwise, if all the parameters are
+estimated at once, it makes sense to restrict some parameters, for example, pass
+CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST flags, which is usually a
+reasonable assumption.
+
+Similarly to calibrateCamera , the function minimizes the total re-projection error for all the
+points in all the available views from both cameras. The function returns the final value of the
+re-projection error.
+ */
+CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
+ InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
+ InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
+ Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
+ int flags = CALIB_FIX_INTRINSIC,
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
+
+
+/** @brief Computes rectification transforms for each head of a calibrated stereo camera.
+
+@param cameraMatrix1 First camera matrix.
+@param distCoeffs1 First camera distortion parameters.
+@param cameraMatrix2 Second camera matrix.
+@param distCoeffs2 Second camera distortion parameters.
+@param imageSize Size of the image used for stereo calibration.
+@param R Rotation matrix between the coordinate systems of the first and the second cameras.
+@param T Translation vector between coordinate systems of the cameras.
+@param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
+@param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
+@param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
+camera.
+@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
+camera.
+@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
+@param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
+the function makes the principal points of each camera have the same pixel coordinates in the
+rectified views. And if the flag is not set, the function may still shift the images in the
+horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
+useful image area.
+@param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
+scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
+images are zoomed and shifted so that only valid pixels are visible (no black areas after
+rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
+pixels from the original images from the cameras are retained in the rectified images (no source
+image pixels are lost). Obviously, any intermediate value yields an intermediate result between
+those two extreme cases.
+@param newImageSize New image resolution after rectification. The same size should be passed to
+initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
+is passed (default), it is set to the original imageSize . Setting it to larger value can help you
+preserve details in the original image, especially when there is a big radial distortion.
+@param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
+are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
+(see the picture below).
+@param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
+are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
+(see the picture below).
+
+The function computes the rotation matrices for each camera that (virtually) make both camera image
+planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
+the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
+as input. As output, it provides two rotation matrices and also two projection matrices in the new
+coordinates. The function distinguishes the following two cases:
+
+- **Horizontal stereo**: the first and the second camera views are shifted relative to each other
+ mainly along the x axis (with possible small vertical shift). In the rectified images, the
+ corresponding epipolar lines in the left and right cameras are horizontal and have the same
+ y-coordinate. P1 and P2 look like:
+
+ \f[\texttt{P1} = \begin{bmatrix} f & 0 & cx_1 & 0 \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\f]
+
+ \f[\texttt{P2} = \begin{bmatrix} f & 0 & cx_2 & T_x*f \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
+
+ where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
+ CV_CALIB_ZERO_DISPARITY is set.
+
+- **Vertical stereo**: the first and the second camera views are shifted relative to each other
+ mainly in vertical direction (and probably a bit in the horizontal direction too). The epipolar
+ lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
+
+ \f[\texttt{P1} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_1 & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\f]
+
+ \f[\texttt{P2} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_2 & T_y*f \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
+
+ where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if CALIB_ZERO_DISPARITY is
+ set.
+
+As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
+matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
+initialize the rectification map for each camera.
+
+See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
+the corresponding image regions. This means that the images are well rectified, which is what most
+stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
+their interiors are all valid pixels.
+
+
+ */
+CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
+ InputArray cameraMatrix2, InputArray distCoeffs2,
+ Size imageSize, InputArray R, InputArray T,
+ OutputArray R1, OutputArray R2,
+ OutputArray P1, OutputArray P2,
+ OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
+ double alpha = -1, Size newImageSize = Size(),
+ CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
+
+/** @brief Computes a rectification transform for an uncalibrated stereo camera.
+
+@param points1 Array of feature points in the first image.
+@param points2 The corresponding points in the second image. The same formats as in
+findFundamentalMat are supported.
+@param F Input fundamental matrix. It can be computed from the same set of point pairs using
+findFundamentalMat .
+@param imgSize Size of the image.
+@param H1 Output rectification homography matrix for the first image.
+@param H2 Output rectification homography matrix for the second image.
+@param threshold Optional threshold used to filter out the outliers. If the parameter is greater
+than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
+for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are
+rejected prior to computing the homographies. Otherwise,all the points are considered inliers.
+
+The function computes the rectification transformations without knowing intrinsic parameters of the
+cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
+related difference from stereoRectify is that the function outputs not the rectification
+transformations in the object (3D) space, but the planar perspective transformations encoded by the
+homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 .
+
+@note
+ While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
+ depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
+ it would be better to correct it before computing the fundamental matrix and calling this
+ function. For example, distortion coefficients can be estimated for each head of stereo camera
+ separately by using calibrateCamera . Then, the images can be corrected using undistort , or
+ just the point coordinates can be corrected with undistortPoints .
+ */
+CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
+ InputArray F, Size imgSize,
+ OutputArray H1, OutputArray H2,
+ double threshold = 5 );
+
+//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
+CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
+ InputArray cameraMatrix2, InputArray distCoeffs2,
+ InputArray cameraMatrix3, InputArray distCoeffs3,
+ InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
+ Size imageSize, InputArray R12, InputArray T12,
+ InputArray R13, InputArray T13,
+ OutputArray R1, OutputArray R2, OutputArray R3,
+ OutputArray P1, OutputArray P2, OutputArray P3,
+ OutputArray Q, double alpha, Size newImgSize,
+ CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
+
+/** @brief Returns the new camera matrix based on the free scaling parameter.
+
+@param cameraMatrix Input camera matrix.
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
+assumed.
+@param imageSize Original image size.
+@param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
+valid) and 1 (when all the source image pixels are retained in the undistorted image). See
+stereoRectify for details.
+@param newImgSize Image size after rectification. By default,it is set to imageSize .
+@param validPixROI Optional output rectangle that outlines all-good-pixels region in the
+undistorted image. See roi1, roi2 description in stereoRectify .
+@param centerPrincipalPoint Optional flag that indicates whether in the new camera matrix the
+principal point should be at the image center or not. By default, the principal point is chosen to
+best fit a subset of the source image (determined by alpha) to the corrected image.
+@return new_camera_matrix Output new camera matrix.
+
+The function computes and returns the optimal new camera matrix based on the free scaling parameter.
+By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
+image pixels if there is valuable information in the corners alpha=1 , or get something in between.
+When alpha\>0 , the undistortion result is likely to have some black pixels corresponding to
+"virtual" pixels outside of the captured distorted image. The original camera matrix, distortion
+coefficients, the computed new camera matrix, and newImageSize should be passed to
+initUndistortRectifyMap to produce the maps for remap .
+ */
+CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
+ Size imageSize, double alpha, Size newImgSize = Size(),
+ CV_OUT Rect* validPixROI = 0,
+ bool centerPrincipalPoint = false);
+
+/** @brief Converts points from Euclidean to homogeneous space.
+
+@param src Input vector of N-dimensional points.
+@param dst Output vector of N+1-dimensional points.
+
+The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
+point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
+ */
+CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
+
+/** @brief Converts points from homogeneous to Euclidean space.
+
+@param src Input vector of N-dimensional points.
+@param dst Output vector of N-1-dimensional points.
+
+The function converts points homogeneous to Euclidean space using perspective projection. That is,
+each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
+output point coordinates will be (0,0,0,...).
+ */
+CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
+
+/** @brief Converts points to/from homogeneous coordinates.
+
+@param src Input array or vector of 2D, 3D, or 4D points.
+@param dst Output vector of 2D, 3D, or 4D points.
+
+The function converts 2D or 3D points from/to homogeneous coordinates by calling either
+convertPointsToHomogeneous or convertPointsFromHomogeneous.
+
+@note The function is obsolete. Use one of the previous two functions instead.
+ */
+CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
+
+/** @brief Calculates a fundamental matrix from the corresponding points in two images.
+
+@param points1 Array of N points from the first image. The point coordinates should be
+floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param method Method for computing a fundamental matrix.
+- **CV_FM_7POINT** for a 7-point algorithm. \f$N = 7\f$
+- **CV_FM_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
+- **CV_FM_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
+- **CV_FM_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$
+@param param1 Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
+line in pixels, beyond which the point is considered an outlier and is not used for computing the
+final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
+point localization, image resolution, and the image noise.
+@param param2 Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level
+of confidence (probability) that the estimated matrix is correct.
+@param mask
+
+The epipolar geometry is described by the following equation:
+
+\f[[p_2; 1]^T F [p_1; 1] = 0\f]
+
+where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
+second images, respectively.
+
+The function calculates the fundamental matrix using one of four methods listed above and returns
+the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
+algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3
+matrices sequentially).
+
+The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
+epipolar lines corresponding to the specified points. It can also be passed to
+stereoRectifyUncalibrated to compute the rectification transformation. :
+@code
+ // Example. Estimation of fundamental matrix using the RANSAC algorithm
+ int point_count = 100;
+ vector points1(point_count);
+ vector points2(point_count);
+
+ // initialize the points here ...
+ for( int i = 0; i < point_count; i++ )
+ {
+ points1[i] = ...;
+ points2[i] = ...;
+ }
+
+ Mat fundamental_matrix =
+ findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
+@endcode
+ */
+CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
+ int method = FM_RANSAC,
+ double param1 = 3., double param2 = 0.99,
+ OutputArray mask = noArray() );
+
+/** @overload */
+CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
+ OutputArray mask, int method = FM_RANSAC,
+ double param1 = 3., double param2 = 0.99 );
+
+/** @brief Calculates an essential matrix from the corresponding points in two images.
+
+@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
+be floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param cameraMatrix Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
+Note that this function assumes that points1 and points2 are feature points from cameras with the
+same camera matrix.
+@param method Method for computing a fundamental matrix.
+- **RANSAC** for the RANSAC algorithm.
+- **MEDS** for the LMedS algorithm.
+@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
+line in pixels, beyond which the point is considered an outlier and is not used for computing the
+final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
+point localization, image resolution, and the image noise.
+@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
+confidence (probability) that the estimated matrix is correct.
+@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
+for the other points. The array is computed only in the RANSAC and LMedS methods.
+
+This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
+@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
+
+\f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f]
+
+where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
+second images, respectively. The result of this function may be passed further to
+decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
+ */
+CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
+ InputArray cameraMatrix, int method = RANSAC,
+ double prob = 0.999, double threshold = 1.0,
+ OutputArray mask = noArray() );
+
+/** @overload
+@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
+be floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param focal focal length of the camera. Note that this function assumes that points1 and points2
+are feature points from cameras with same focal length and principle point.
+@param pp principle point of the camera.
+@param method Method for computing a fundamental matrix.
+- **RANSAC** for the RANSAC algorithm.
+- **LMEDS** for the LMedS algorithm.
+@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
+line in pixels, beyond which the point is considered an outlier and is not used for computing the
+final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
+point localization, image resolution, and the image noise.
+@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
+confidence (probability) that the estimated matrix is correct.
+@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
+for the other points. The array is computed only in the RANSAC and LMedS methods.
+
+This function differs from the one above that it computes camera matrix from focal length and
+principal point:
+
+\f[K =
+\begin{bmatrix}
+f & 0 & x_{pp} \\
+0 & f & y_{pp} \\
+0 & 0 & 1
+\end{bmatrix}\f]
+ */
+CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
+ double focal = 1.0, Point2d pp = Point2d(0, 0),
+ int method = RANSAC, double prob = 0.999,
+ double threshold = 1.0, OutputArray mask = noArray() );
+
+/** @brief Decompose an essential matrix to possible rotations and translation.
+
+@param E The input essential matrix.
+@param R1 One possible rotation matrix.
+@param R2 Another possible rotation matrix.
+@param t One possible translation.
+
+This function decompose an essential matrix E using svd decomposition @cite HartleyZ00 . Generally 4
+possible poses exists for a given E. They are \f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$. By
+decomposing E, you can only get the direction of the translation, so the function returns unit t.
+ */
+CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
+
+/** @brief Recover relative camera rotation and translation from an estimated essential matrix and the
+corresponding points in two images, using cheirality check. Returns the number of inliers which pass
+the check.
+
+@param E The input essential matrix.
+@param points1 Array of N 2D points from the first image. The point coordinates should be
+floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param cameraMatrix Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
+Note that this function assumes that points1 and points2 are feature points from cameras with the
+same camera matrix.
+@param R Recovered relative rotation.
+@param t Recoverd relative translation.
+@param mask Input/output mask for inliers in points1 and points2.
+: If it is not empty, then it marks inliers in points1 and points2 for then given essential
+matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
+which pass the cheirality check.
+This function decomposes an essential matrix using decomposeEssentialMat and then verifies possible
+pose hypotheses by doing cheirality check. The cheirality check basically means that the
+triangulated 3D points should have positive depth. Some details can be found in @cite Nister03 .
+
+This function can be used to process output E and mask from findEssentialMat. In this scenario,
+points1 and points2 are the same input for findEssentialMat. :
+@code
+ // Example. Estimation of fundamental matrix using the RANSAC algorithm
+ int point_count = 100;
+ vector points1(point_count);
+ vector points2(point_count);
+
+ // initialize the points here ...
+ for( int i = 0; i < point_count; i++ )
+ {
+ points1[i] = ...;
+ points2[i] = ...;
+ }
+
+ // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
+ Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
+
+ Mat E, R, t, mask;
+
+ E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
+ recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
+@endcode
+ */
+CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
+ InputArray cameraMatrix, OutputArray R, OutputArray t,
+ InputOutputArray mask = noArray() );
+
+/** @overload
+@param E The input essential matrix.
+@param points1 Array of N 2D points from the first image. The point coordinates should be
+floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1 .
+@param R Recovered relative rotation.
+@param t Recoverd relative translation.
+@param focal Focal length of the camera. Note that this function assumes that points1 and points2
+are feature points from cameras with same focal length and principle point.
+@param pp Principle point of the camera.
+@param mask Input/output mask for inliers in points1 and points2.
+: If it is not empty, then it marks inliers in points1 and points2 for then given essential
+matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
+which pass the cheirality check.
+
+This function differs from the one above that it computes camera matrix from focal length and
+principal point:
+
+\f[K =
+\begin{bmatrix}
+f & 0 & x_{pp} \\
+0 & f & y_{pp} \\
+0 & 0 & 1
+\end{bmatrix}\f]
+ */
+CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
+ OutputArray R, OutputArray t,
+ double focal = 1.0, Point2d pp = Point2d(0, 0),
+ InputOutputArray mask = noArray() );
+
+/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
+
+@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
+vector\ .
+@param whichImage Index of the image (1 or 2) that contains the points .
+@param F Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .
+@param lines Output vector of the epipolar lines corresponding to the points in the other image.
+Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ .
+
+For every point in one of the two images of a stereo pair, the function finds the equation of the
+corresponding epipolar line in the other image.
+
+From the fundamental matrix definition (see findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second
+image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as:
+
+\f[l^{(2)}_i = F p^{(1)}_i\f]
+
+And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as:
+
+\f[l^{(1)}_i = F^T p^{(2)}_i\f]
+
+Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ .
+ */
+CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage,
+ InputArray F, OutputArray lines );
+
+/** @brief Reconstructs points by triangulation.
+
+@param projMatr1 3x4 projection matrix of the first camera.
+@param projMatr2 3x4 projection matrix of the second camera.
+@param projPoints1 2xN array of feature points in the first image. In case of c++ version it can
+be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
+@param projPoints2 2xN array of corresponding points in the second image. In case of c++ version
+it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
+@param points4D 4xN array of reconstructed points in homogeneous coordinates.
+
+The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their
+observations with a stereo camera. Projections matrices can be obtained from stereoRectify.
+
+@note
+ Keep in mind that all input data should be of float type in order for this function to work.
+
+@sa
+ reprojectImageTo3D
+ */
+CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
+ InputArray projPoints1, InputArray projPoints2,
+ OutputArray points4D );
+
+/** @brief Refines coordinates of corresponding points.
+
+@param F 3x3 fundamental matrix.
+@param points1 1xN array containing the first set of points.
+@param points2 1xN array containing the second set of points.
+@param newPoints1 The optimized points1.
+@param newPoints2 The optimized points2.
+
+The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
+For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
+computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
+error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the
+geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint
+\f$newPoints2^T * F * newPoints1 = 0\f$ .
+ */
+CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
+ OutputArray newPoints1, OutputArray newPoints2 );
+
+/** @brief Filters off small noise blobs (speckles) in the disparity map
+
+@param img The input 16-bit signed disparity image
+@param newVal The disparity value used to paint-off the speckles
+@param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
+affected by the algorithm
+@param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
+blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
+disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
+account when specifying this parameter value.
+@param buf The optional temporary buffer to avoid memory allocation within the function.
+ */
+CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
+ int maxSpeckleSize, double maxDiff,
+ InputOutputArray buf = noArray() );
+
+//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
+CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
+ int minDisparity, int numberOfDisparities,
+ int SADWindowSize );
+
+//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
+CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
+ int minDisparity, int numberOfDisparities,
+ int disp12MaxDisp = 1 );
+
+/** @brief Reprojects a disparity image to 3D space.
+
+@param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
+floating-point disparity image. If 16-bit signed format is used, the values are assumed to have no
+fractional bits.
+@param _3dImage Output 3-channel floating-point image of the same size as disparity . Each
+element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
+map.
+@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with stereoRectify.
+@param handleMissingValues Indicates, whether the function should handle missing values (i.e.
+points where the disparity was not computed). If handleMissingValues=true, then pixels with the
+minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
+to 3D points with a very large Z value (currently set to 10000).
+@param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
+depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
+
+The function transforms a single-channel disparity map to a 3-channel image representing a 3D
+surface. That is, for each pixel (x,y) andthe corresponding disparity d=disparity(x,y) , it
+computes:
+
+\f[\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\f]
+
+The matrix Q can be an arbitrary \f$4 \times 4\f$ matrix (for example, the one computed by
+stereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use
+perspectiveTransform .
+ */
+CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
+ OutputArray _3dImage, InputArray Q,
+ bool handleMissingValues = false,
+ int ddepth = -1 );
+
+/** @brief Calculates the Sampson Distance between two points.
+
+The function sampsonDistance calculates and returns the first order approximation of the geometric error as:
+\f[sd( \texttt{pt1} , \texttt{pt2} )= \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}{(\texttt{F} \cdot \texttt{pt1})(0) + (\texttt{F} \cdot \texttt{pt1})(1) + (\texttt{F}^t \cdot \texttt{pt2})(0) + (\texttt{F}^t \cdot \texttt{pt2})(1)}\f]
+The fundamental matrix may be calculated using the cv::findFundamentalMat function. See HZ 11.4.3 for details.
+@param pt1 first homogeneous 2d point
+@param pt2 second homogeneous 2d point
+@param F fundamental matrix
+*/
+CV_EXPORTS_W double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F);
+
+/** @brief Computes an optimal affine transformation between two 3D point sets.
+
+@param src First input 3D point set.
+@param dst Second input 3D point set.
+@param out Output 3D affine transformation matrix \f$3 \times 4\f$ .
+@param inliers Output vector indicating which points are inliers.
+@param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
+an inlier.
+@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
+between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
+significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
+
+The function estimates an optimal 3D affine transformation between two 3D point sets using the
+RANSAC algorithm.
+ */
+CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
+ OutputArray out, OutputArray inliers,
+ double ransacThreshold = 3, double confidence = 0.99);
+
+/** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
+
+@param H The input homography matrix between two images.
+@param K The input intrinsic camera calibration matrix.
+@param rotations Array of rotation matrices.
+@param translations Array of translation matrices.
+@param normals Array of plane normal matrices.
+
+This function extracts relative camera motion between two views observing a planar object from the
+homography H induced by the plane. The intrinsic camera matrix K must also be provided. The function
+may return up to four mathematical solution sets. At least two of the solutions may further be
+invalidated if point correspondences are available by applying positive depth constraint (all points
+must be in front of the camera). The decomposition method is described in detail in @cite Malis .
+ */
+CV_EXPORTS_W int decomposeHomographyMat(InputArray H,
+ InputArray K,
+ OutputArrayOfArrays rotations,
+ OutputArrayOfArrays translations,
+ OutputArrayOfArrays normals);
+
+/** @brief The base class for stereo correspondence algorithms.
+ */
+class CV_EXPORTS_W StereoMatcher : public Algorithm
+{
+public:
+ enum { DISP_SHIFT = 4,
+ DISP_SCALE = (1 << DISP_SHIFT)
+ };
+
+ /** @brief Computes disparity map for the specified stereo pair
+
+ @param left Left 8-bit single-channel image.
+ @param right Right image of the same size and the same type as the left one.
+ @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
+ like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
+ has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
+ */
+ CV_WRAP virtual void compute( InputArray left, InputArray right,
+ OutputArray disparity ) = 0;
+
+ CV_WRAP virtual int getMinDisparity() const = 0;
+ CV_WRAP virtual void setMinDisparity(int minDisparity) = 0;
+
+ CV_WRAP virtual int getNumDisparities() const = 0;
+ CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;
+
+ CV_WRAP virtual int getBlockSize() const = 0;
+ CV_WRAP virtual void setBlockSize(int blockSize) = 0;
+
+ CV_WRAP virtual int getSpeckleWindowSize() const = 0;
+ CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0;
+
+ CV_WRAP virtual int getSpeckleRange() const = 0;
+ CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0;
+
+ CV_WRAP virtual int getDisp12MaxDiff() const = 0;
+ CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0;
+};
+
+
+/** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and
+contributed to OpenCV by K. Konolige.
+ */
+class CV_EXPORTS_W StereoBM : public StereoMatcher
+{
+public:
+ enum { PREFILTER_NORMALIZED_RESPONSE = 0,
+ PREFILTER_XSOBEL = 1
+ };
+
+ CV_WRAP virtual int getPreFilterType() const = 0;
+ CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
+
+ CV_WRAP virtual int getPreFilterSize() const = 0;
+ CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0;
+
+ CV_WRAP virtual int getPreFilterCap() const = 0;
+ CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
+
+ CV_WRAP virtual int getTextureThreshold() const = 0;
+ CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0;
+
+ CV_WRAP virtual int getUniquenessRatio() const = 0;
+ CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
+
+ CV_WRAP virtual int getSmallerBlockSize() const = 0;
+ CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0;
+
+ CV_WRAP virtual Rect getROI1() const = 0;
+ CV_WRAP virtual void setROI1(Rect roi1) = 0;
+
+ CV_WRAP virtual Rect getROI2() const = 0;
+ CV_WRAP virtual void setROI2(Rect roi2) = 0;
+
+ /** @brief Creates StereoBM object
+
+ @param numDisparities the disparity search range. For each pixel algorithm will find the best
+ disparity from 0 (default minimum disparity) to numDisparities. The search range can then be
+ shifted by changing the minimum disparity.
+ @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd
+ (as the block is centered at the current pixel). Larger block size implies smoother, though less
+ accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
+ chance for algorithm to find a wrong correspondence.
+
+ The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for
+ a specific stereo pair.
+ */
+ CV_WRAP static Ptr create(int numDisparities = 0, int blockSize = 21);
+};
+
+/** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original
+one as follows:
+
+- By default, the algorithm is single-pass, which means that you consider only 5 directions
+instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the
+algorithm but beware that it may consume a lot of memory.
+- The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the
+blocks to single pixels.
+- Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi
+sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.
+- Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for
+example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness
+check, quadratic interpolation and speckle filtering).
+
+@note
+ - (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found
+ at opencv_source_code/samples/python/stereo_match.py
+ */
+class CV_EXPORTS_W StereoSGBM : public StereoMatcher
+{
+public:
+ enum
+ {
+ MODE_SGBM = 0,
+ MODE_HH = 1,
+ MODE_SGBM_3WAY = 2
+ };
+
+ CV_WRAP virtual int getPreFilterCap() const = 0;
+ CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
+
+ CV_WRAP virtual int getUniquenessRatio() const = 0;
+ CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
+
+ CV_WRAP virtual int getP1() const = 0;
+ CV_WRAP virtual void setP1(int P1) = 0;
+
+ CV_WRAP virtual int getP2() const = 0;
+ CV_WRAP virtual void setP2(int P2) = 0;
+
+ CV_WRAP virtual int getMode() const = 0;
+ CV_WRAP virtual void setMode(int mode) = 0;
+
+ /** @brief Creates StereoSGBM object
+
+ @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes
+ rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
+ @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than
+ zero. In the current implementation, this parameter must be divisible by 16.
+ @param blockSize Matched block size. It must be an odd number \>=1 . Normally, it should be
+ somewhere in the 3..11 range.
+ @param P1 The first parameter controlling the disparity smoothness. See below.
+ @param P2 The second parameter controlling the disparity smoothness. The larger the values are,
+ the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
+ between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
+ pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
+ P1 and P2 values are shown (like 8\*number_of_image_channels\*SADWindowSize\*SADWindowSize and
+ 32\*number_of_image_channels\*SADWindowSize\*SADWindowSize , respectively).
+ @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
+ disparity check. Set it to a non-positive value to disable the check.
+ @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first
+ computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.
+ The result values are passed to the Birchfield-Tomasi pixel cost function.
+ @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function
+ value should "win" the second best value to consider the found match correct. Normally, a value
+ within the 5-15 range is good enough.
+ @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles
+ and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the
+ 50-200 range.
+ @param speckleRange Maximum disparity variation within each connected component. If you do speckle
+ filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
+ Normally, 1 or 2 is good enough.
+ @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming
+ algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and
+ huge for HD-size pictures. By default, it is set to false .
+
+ The first constructor initializes StereoSGBM with all the default parameters. So, you only have to
+ set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter
+ to a custom value.
+ */
+ CV_WRAP static Ptr create(int minDisparity, int numDisparities, int blockSize,
+ int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
+ int preFilterCap = 0, int uniquenessRatio = 0,
+ int speckleWindowSize = 0, int speckleRange = 0,
+ int mode = StereoSGBM::MODE_SGBM);
+};
+
+//! @} calib3d
+
+/** @brief The methods in this namespace use a so-called fisheye camera model.
+ @ingroup calib3d_fisheye
+*/
+namespace fisheye
+{
+//! @addtogroup calib3d_fisheye
+//! @{
+
+ enum{
+ CALIB_USE_INTRINSIC_GUESS = 1,
+ CALIB_RECOMPUTE_EXTRINSIC = 2,
+ CALIB_CHECK_COND = 4,
+ CALIB_FIX_SKEW = 8,
+ CALIB_FIX_K1 = 16,
+ CALIB_FIX_K2 = 32,
+ CALIB_FIX_K3 = 64,
+ CALIB_FIX_K4 = 128,
+ CALIB_FIX_INTRINSIC = 256
+ };
+
+ /** @brief Projects points using fisheye model
+
+ @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\ ), where N is
+ the number of points in the view.
+ @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
+ vector\.
+ @param affine
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param alpha The skew coefficient.
+ @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect
+ to components of the focal lengths, coordinates of the principal point, distortion coefficients,
+ rotation vector, translation vector, and the skew. In the old interface different components of
+ the jacobian are returned via different output parameters.
+
+ The function computes projections of 3D points to the image plane given intrinsic and extrinsic
+ camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
+ image points coordinates (as functions of all the input parameters) with respect to the particular
+ parameters, intrinsic and/or extrinsic.
+ */
+ CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
+ InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
+
+ /** @overload */
+ CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
+ InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
+
+ /** @brief Distorts 2D points using fisheye model.
+
+ @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is
+ the number of points in the view.
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param alpha The skew coefficient.
+ @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
+ */
+ CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
+
+ /** @brief Undistorts 2D points using fisheye model
+
+ @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is the
+ number of points in the view.
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
+ 1-channel or 1x1 3-channel
+ @param P New camera matrix (3x3) or new projection matrix (3x4)
+ @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
+ */
+ CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted,
+ InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray());
+
+ /** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero
+ distortion is used, if R or P is empty identity matrixes are used.
+
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
+ 1-channel or 1x1 3-channel
+ @param P New camera matrix (3x3) or new projection matrix (3x4)
+ @param size Undistorted image size.
+ @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
+ for details.
+ @param map1 The first output map.
+ @param map2 The second output map.
+ */
+ CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
+ const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
+
+ /** @brief Transforms an image to compensate for fisheye lens distortion.
+
+ @param distorted image with fisheye lens distortion.
+ @param undistorted Output image with compensated fisheye lens distortion.
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param Knew Camera matrix of the distorted image. By default, it is the identity matrix but you
+ may additionally scale and shift the result by using a different matrix.
+ @param new_size
+
+ The function transforms an image to compensate radial and tangential lens distortion.
+
+ The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
+ (with bilinear interpolation). See the former function for details of the transformation being
+ performed.
+
+ See below the results of undistortImage.
+ - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
+ k_4, k_5, k_6) of distortion were optimized under calibration)
+ - b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
+ k_3, k_4) of fisheye distortion were optimized under calibration)
+ - c\) original image was captured with fisheye lens
+
+ Pictures a) and b) almost the same. But if we consider points of image located far from the center
+ of image, we can notice that on image a) these points are distorted.
+
+ 
+ */
+ CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted,
+ InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
+
+ /** @brief Estimates new camera matrix for undistortion or rectification.
+
+ @param K Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
+ @param image_size
+ @param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
+ 1-channel or 1x1 3-channel
+ @param P New camera matrix (3x3) or new projection matrix (3x4)
+ @param balance Sets the new focal length in range between the min focal length and the max focal
+ length. Balance is in range of [0, 1].
+ @param new_size
+ @param fov_scale Divisor for new focal length.
+ */
+ CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
+ OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
+
+ /** @brief Performs camera calibaration
+
+ @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
+ coordinate space.
+ @param imagePoints vector of vectors of the projections of calibration pattern points.
+ imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
+ objectPoints[i].size() for each i.
+ @param image_size Size of the image used only to initialize the intrinsic camera matrix.
+ @param K Output 3x3 floating-point camera matrix
+ \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If
+ fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
+ initialized before calling the function.
+ @param D Output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
+ @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
+ That is, each k-th rotation vector together with the corresponding k-th translation vector (see
+ the next output parameter description) brings the calibration pattern from the model coordinate
+ space (in which object points are specified) to the world coordinate space, that is, a real
+ position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
+ @param tvecs Output vector of translation vectors estimated for each pattern view.
+ @param flags Different flags that may be zero or a combination of the following values:
+ - **fisheye::CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
+ fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
+ center ( imageSize is used), and focal distances are computed in a least-squares fashion.
+ - **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
+ of intrinsic optimization.
+ - **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
+ - **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
+ - **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
+ zero.
+ @param criteria Termination criteria for the iterative optimization algorithm.
+ */
+ CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
+ InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
+
+ /** @brief Stereo rectification for fisheye camera model
+
+ @param K1 First camera matrix.
+ @param D1 First camera distortion parameters.
+ @param K2 Second camera matrix.
+ @param D2 Second camera distortion parameters.
+ @param imageSize Size of the image used for stereo calibration.
+ @param R Rotation matrix between the coordinate systems of the first and the second
+ cameras.
+ @param tvec Translation vector between coordinate systems of the cameras.
+ @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
+ @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
+ @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
+ camera.
+ @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
+ camera.
+ @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
+ @param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
+ the function makes the principal points of each camera have the same pixel coordinates in the
+ rectified views. And if the flag is not set, the function may still shift the images in the
+ horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
+ useful image area.
+ @param newImageSize New image resolution after rectification. The same size should be passed to
+ initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
+ is passed (default), it is set to the original imageSize . Setting it to larger value can help you
+ preserve details in the original image, especially when there is a big radial distortion.
+ @param balance Sets the new focal length in range between the min focal length and the max focal
+ length. Balance is in range of [0, 1].
+ @param fov_scale Divisor for new focal length.
+ */
+ CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
+ OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
+ double balance = 0.0, double fov_scale = 1.0);
+
+ /** @brief Performs stereo calibration
+
+ @param objectPoints Vector of vectors of the calibration pattern points.
+ @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
+ observed by the first camera.
+ @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
+ observed by the second camera.
+ @param K1 Input/output first camera matrix:
+ \f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
+ any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CV_CALIB_FIX_INTRINSIC are specified,
+ some or all of the matrix components must be initialized.
+ @param D1 Input/output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$ of 4 elements.
+ @param K2 Input/output second camera matrix. The parameter is similar to K1 .
+ @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
+ similar to D1 .
+ @param imageSize Size of the image used only to initialize intrinsic camera matrix.
+ @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
+ @param T Output translation vector between the coordinate systems of the cameras.
+ @param flags Different flags that may be zero or a combination of the following values:
+ - **fisheye::CV_CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
+ are estimated.
+ - **fisheye::CALIB_USE_INTRINSIC_GUESS** K1, K2 contains valid initial values of
+ fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
+ center (imageSize is used), and focal distances are computed in a least-squares fashion.
+ - **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
+ of intrinsic optimization.
+ - **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
+ - **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
+ - **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
+ zero.
+ @param criteria Termination criteria for the iterative optimization algorithm.
+ */
+ CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
+ InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
+ OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC,
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
+
+//! @} calib3d_fisheye
+}
+
+} // cv
+
+#ifndef DISABLE_OPENCV_24_COMPATIBILITY
+#include "opencv2/calib3d/calib3d_c.h"
+#endif
+
+#endif
diff --git a/lib/opencv/include/opencv2/calib3d/calib3d.hpp b/lib/opencv/include/opencv2/calib3d/calib3d.hpp
new file mode 100644
index 000000000..b3da45edd
--- /dev/null
+++ b/lib/opencv/include/opencv2/calib3d/calib3d.hpp
@@ -0,0 +1,48 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifdef __OPENCV_BUILD
+#error this is a compatibility header which should not be used inside the OpenCV library
+#endif
+
+#include "opencv2/calib3d.hpp"
diff --git a/lib/opencv/include/opencv2/calib3d/calib3d_c.h b/lib/opencv/include/opencv2/calib3d/calib3d_c.h
new file mode 100644
index 000000000..0e77aa883
--- /dev/null
+++ b/lib/opencv/include/opencv2/calib3d/calib3d_c.h
@@ -0,0 +1,425 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CALIB3D_C_H__
+#define __OPENCV_CALIB3D_C_H__
+
+#include "opencv2/core/core_c.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup calib3d_c
+ @{
+ */
+
+/****************************************************************************************\
+* Camera Calibration, Pose Estimation and Stereo *
+\****************************************************************************************/
+
+typedef struct CvPOSITObject CvPOSITObject;
+
+/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
+CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
+
+
+/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
+ an object given its model and projection in a weak-perspective case */
+CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
+ double focal_length, CvTermCriteria criteria,
+ float* rotation_matrix, float* translation_vector);
+
+/* Releases CvPOSITObject structure */
+CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
+
+/* updates the number of RANSAC iterations */
+CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
+ int model_points, int max_iters );
+
+CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
+
+/* Calculates fundamental matrix given a set of corresponding points */
+#define CV_FM_7POINT 1
+#define CV_FM_8POINT 2
+
+#define CV_LMEDS 4
+#define CV_RANSAC 8
+
+#define CV_FM_LMEDS_ONLY CV_LMEDS
+#define CV_FM_RANSAC_ONLY CV_RANSAC
+#define CV_FM_LMEDS CV_LMEDS
+#define CV_FM_RANSAC CV_RANSAC
+
+enum
+{
+ CV_ITERATIVE = 0,
+ CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
+ CV_P3P = 2, // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
+ CV_DLS = 3 // Joel A. Hesch and Stergios I. Roumeliotis. "A Direct Least-Squares (DLS) Method for PnP"
+};
+
+CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
+ CvMat* fundamental_matrix,
+ int method CV_DEFAULT(CV_FM_RANSAC),
+ double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
+ CvMat* status CV_DEFAULT(NULL) );
+
+/* For each input point on one of images
+ computes parameters of the corresponding
+ epipolar line on the other image */
+CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
+ int which_image,
+ const CvMat* fundamental_matrix,
+ CvMat* correspondent_lines );
+
+/* Triangulation functions */
+
+CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
+ CvMat* projPoints1, CvMat* projPoints2,
+ CvMat* points4D);
+
+CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
+ CvMat* new_points1, CvMat* new_points2);
+
+
+/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
+ alpha=0 - only valid pixels will be retained in the undistorted image
+ alpha=1 - all the source image pixels will be retained in the undistorted image
+*/
+CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
+ const CvMat* dist_coeffs,
+ CvSize image_size, double alpha,
+ CvMat* new_camera_matrix,
+ CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
+ CvRect* valid_pixel_ROI CV_DEFAULT(0),
+ int center_principal_point CV_DEFAULT(0));
+
+/* Converts rotation vector to rotation matrix or vice versa */
+CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
+ CvMat* jacobian CV_DEFAULT(0) );
+
+/* Finds perspective transformation between the object plane and image (view) plane */
+CVAPI(int) cvFindHomography( const CvMat* src_points,
+ const CvMat* dst_points,
+ CvMat* homography,
+ int method CV_DEFAULT(0),
+ double ransacReprojThreshold CV_DEFAULT(3),
+ CvMat* mask CV_DEFAULT(0),
+ int maxIters CV_DEFAULT(2000),
+ double confidence CV_DEFAULT(0.995));
+
+/* Computes RQ decomposition for 3x3 matrices */
+CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
+ CvMat *matrixQx CV_DEFAULT(NULL),
+ CvMat *matrixQy CV_DEFAULT(NULL),
+ CvMat *matrixQz CV_DEFAULT(NULL),
+ CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
+
+/* Computes projection matrix decomposition */
+CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
+ CvMat *rotMatr, CvMat *posVect,
+ CvMat *rotMatrX CV_DEFAULT(NULL),
+ CvMat *rotMatrY CV_DEFAULT(NULL),
+ CvMat *rotMatrZ CV_DEFAULT(NULL),
+ CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
+
+/* Computes d(AB)/dA and d(AB)/dB */
+CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
+
+/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
+ t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
+CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
+ const CvMat* _rvec2, const CvMat* _tvec2,
+ CvMat* _rvec3, CvMat* _tvec3,
+ CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
+ CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
+ CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
+ CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
+
+/* Projects object points to the view plane using
+ the specified extrinsic and intrinsic camera parameters */
+CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
+ const CvMat* translation_vector, const CvMat* camera_matrix,
+ const CvMat* distortion_coeffs, CvMat* image_points,
+ CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
+ CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
+ CvMat* dpddist CV_DEFAULT(NULL),
+ double aspect_ratio CV_DEFAULT(0));
+
+/* Finds extrinsic camera parameters from
+ a few known corresponding point pairs and intrinsic parameters */
+CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
+ const CvMat* image_points,
+ const CvMat* camera_matrix,
+ const CvMat* distortion_coeffs,
+ CvMat* rotation_vector,
+ CvMat* translation_vector,
+ int use_extrinsic_guess CV_DEFAULT(0) );
+
+/* Computes initial estimate of the intrinsic camera parameters
+ in case of planar calibration target (e.g. chessboard) */
+CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
+ const CvMat* image_points,
+ const CvMat* npoints, CvSize image_size,
+ CvMat* camera_matrix,
+ double aspect_ratio CV_DEFAULT(1.) );
+
+#define CV_CALIB_CB_ADAPTIVE_THRESH 1
+#define CV_CALIB_CB_NORMALIZE_IMAGE 2
+#define CV_CALIB_CB_FILTER_QUADS 4
+#define CV_CALIB_CB_FAST_CHECK 8
+
+// Performs a fast check if a chessboard is in the input image. This is a workaround to
+// a problem of cvFindChessboardCorners being slow on images with no chessboard
+// - src: input image
+// - size: chessboard size
+// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
+// 0 if there is no chessboard, -1 in case of error
+CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
+
+ /* Detects corners on a chessboard calibration pattern */
+CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
+ CvPoint2D32f* corners,
+ int* corner_count CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
+
+/* Draws individual chessboard corners or the whole chessboard detected */
+CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
+ CvPoint2D32f* corners,
+ int count, int pattern_was_found );
+
+#define CV_CALIB_USE_INTRINSIC_GUESS 1
+#define CV_CALIB_FIX_ASPECT_RATIO 2
+#define CV_CALIB_FIX_PRINCIPAL_POINT 4
+#define CV_CALIB_ZERO_TANGENT_DIST 8
+#define CV_CALIB_FIX_FOCAL_LENGTH 16
+#define CV_CALIB_FIX_K1 32
+#define CV_CALIB_FIX_K2 64
+#define CV_CALIB_FIX_K3 128
+#define CV_CALIB_FIX_K4 2048
+#define CV_CALIB_FIX_K5 4096
+#define CV_CALIB_FIX_K6 8192
+#define CV_CALIB_RATIONAL_MODEL 16384
+#define CV_CALIB_THIN_PRISM_MODEL 32768
+#define CV_CALIB_FIX_S1_S2_S3_S4 65536
+#define CV_CALIB_TILTED_MODEL 262144
+#define CV_CALIB_FIX_TAUX_TAUY 524288
+
+
+/* Finds intrinsic and extrinsic camera parameters
+ from a few views of known calibration pattern */
+CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
+ const CvMat* image_points,
+ const CvMat* point_counts,
+ CvSize image_size,
+ CvMat* camera_matrix,
+ CvMat* distortion_coeffs,
+ CvMat* rotation_vectors CV_DEFAULT(NULL),
+ CvMat* translation_vectors CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(0),
+ CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
+ CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
+
+/* Computes various useful characteristics of the camera from the data computed by
+ cvCalibrateCamera2 */
+CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
+ CvSize image_size,
+ double aperture_width CV_DEFAULT(0),
+ double aperture_height CV_DEFAULT(0),
+ double *fovx CV_DEFAULT(NULL),
+ double *fovy CV_DEFAULT(NULL),
+ double *focal_length CV_DEFAULT(NULL),
+ CvPoint2D64f *principal_point CV_DEFAULT(NULL),
+ double *pixel_aspect_ratio CV_DEFAULT(NULL));
+
+#define CV_CALIB_FIX_INTRINSIC 256
+#define CV_CALIB_SAME_FOCAL_LENGTH 512
+
+/* Computes the transformation from one camera coordinate system to another one
+ from a few correspondent views of the same calibration target. Optionally, calibrates
+ both cameras */
+CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
+ const CvMat* image_points2, const CvMat* npoints,
+ CvMat* camera_matrix1, CvMat* dist_coeffs1,
+ CvMat* camera_matrix2, CvMat* dist_coeffs2,
+ CvSize image_size, CvMat* R, CvMat* T,
+ CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
+ int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC),
+ CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
+ CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)) );
+
+#define CV_CALIB_ZERO_DISPARITY 1024
+
+/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
+ views parallel (=> to make all the epipolar lines horizontal or vertical) */
+CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
+ const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
+ CvSize image_size, const CvMat* R, const CvMat* T,
+ CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
+ CvMat* Q CV_DEFAULT(0),
+ int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
+ double alpha CV_DEFAULT(-1),
+ CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
+ CvRect* valid_pix_ROI1 CV_DEFAULT(0),
+ CvRect* valid_pix_ROI2 CV_DEFAULT(0));
+
+/* Computes rectification transformations for uncalibrated pair of images using a set
+ of point correspondences */
+CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
+ const CvMat* F, CvSize img_size,
+ CvMat* H1, CvMat* H2,
+ double threshold CV_DEFAULT(5));
+
+
+
+/* stereo correspondence parameters and functions */
+
+#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
+#define CV_STEREO_BM_XSOBEL 1
+
+/* Block matching algorithm structure */
+typedef struct CvStereoBMState
+{
+ // pre-filtering (normalization of input images)
+ int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
+ int preFilterSize; // averaging window size: ~5x5..21x21
+ int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
+
+ // correspondence using Sum of Absolute Difference (SAD)
+ int SADWindowSize; // ~5x5..21x21
+ int minDisparity; // minimum disparity (can be negative)
+ int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
+
+ // post-filtering
+ int textureThreshold; // the disparity is only computed for pixels
+ // with textured enough neighborhood
+ int uniquenessRatio; // accept the computed disparity d* only if
+ // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
+ // for any d != d*+/-1 within the search range.
+ int speckleWindowSize; // disparity variation window
+ int speckleRange; // acceptable range of variation in window
+
+ int trySmallerWindows; // if 1, the results may be more accurate,
+ // at the expense of slower processing
+ CvRect roi1, roi2;
+ int disp12MaxDiff;
+
+ // temporary buffers
+ CvMat* preFilteredImg0;
+ CvMat* preFilteredImg1;
+ CvMat* slidingSumBuf;
+ CvMat* cost;
+ CvMat* disp;
+} CvStereoBMState;
+
+#define CV_STEREO_BM_BASIC 0
+#define CV_STEREO_BM_FISH_EYE 1
+#define CV_STEREO_BM_NARROW 2
+
+CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
+ int numberOfDisparities CV_DEFAULT(0));
+
+CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
+
+CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
+ CvArr* disparity, CvStereoBMState* state );
+
+CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
+ int numberOfDisparities, int SADWindowSize );
+
+CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
+ int minDisparity, int numberOfDisparities,
+ int disp12MaxDiff CV_DEFAULT(1) );
+
+/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
+CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
+ CvArr* _3dImage, const CvMat* Q,
+ int handleMissingValues CV_DEFAULT(0) );
+
+/** @} calib3d_c */
+
+#ifdef __cplusplus
+} // extern "C"
+
+//////////////////////////////////////////////////////////////////////////////////////////
+class CV_EXPORTS CvLevMarq
+{
+public:
+ CvLevMarq();
+ CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
+ bool completeSymmFlag=false );
+ ~CvLevMarq();
+ void init( int nparams, int nerrs, CvTermCriteria criteria=
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
+ bool completeSymmFlag=false );
+ bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
+ bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
+
+ void clear();
+ void step();
+ enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
+
+ cv::Ptr mask;
+ cv::Ptr prevParam;
+ cv::Ptr param;
+ cv::Ptr J;
+ cv::Ptr err;
+ cv::Ptr JtJ;
+ cv::Ptr JtJN;
+ cv::Ptr JtErr;
+ cv::Ptr JtJV;
+ cv::Ptr JtJW;
+ double prevErrNorm, errNorm;
+ int lambdaLg10;
+ CvTermCriteria criteria;
+ int state;
+ int iters;
+ bool completeSymmFlag;
+ int solveMethod;
+};
+
+#endif
+
+#endif /* __OPENCV_CALIB3D_C_H__ */
diff --git a/lib/opencv/include/opencv2/core.hpp b/lib/opencv/include/opencv2/core.hpp
new file mode 100644
index 000000000..559226079
--- /dev/null
+++ b/lib/opencv/include/opencv2/core.hpp
@@ -0,0 +1,3168 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2015, OpenCV Foundation, all rights reserved.
+// Copyright (C) 2015, Itseez Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_HPP__
+#define __OPENCV_CORE_HPP__
+
+#ifndef __cplusplus
+# error core.hpp header must be compiled as C++
+#endif
+
+#include "opencv2/core/cvdef.h"
+#include "opencv2/core/version.hpp"
+#include "opencv2/core/base.hpp"
+#include "opencv2/core/cvstd.hpp"
+#include "opencv2/core/traits.hpp"
+#include "opencv2/core/matx.hpp"
+#include "opencv2/core/types.hpp"
+#include "opencv2/core/mat.hpp"
+#include "opencv2/core/persistence.hpp"
+
+/**
+@defgroup core Core functionality
+@{
+ @defgroup core_basic Basic structures
+ @defgroup core_c C structures and operations
+ @{
+ @defgroup core_c_glue Connections with C++
+ @}
+ @defgroup core_array Operations on arrays
+ @defgroup core_xml XML/YAML Persistence
+ @defgroup core_cluster Clustering
+ @defgroup core_utils Utility and system functions and macros
+ @{
+ @defgroup core_utils_sse SSE utilities
+ @defgroup core_utils_neon NEON utilities
+ @}
+ @defgroup core_opengl OpenGL interoperability
+ @defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
+ @defgroup core_optim Optimization Algorithms
+ @defgroup core_directx DirectX interoperability
+ @defgroup core_eigen Eigen support
+ @defgroup core_opencl OpenCL support
+ @defgroup core_va_intel Intel VA-API/OpenCL (CL-VA) interoperability
+ @defgroup core_hal Hardware Acceleration Layer
+ @{
+ @defgroup core_hal_functions Functions
+ @defgroup core_hal_interface Interface
+ @defgroup core_hal_intrin Universal intrinsics
+ @{
+ @defgroup core_hal_intrin_impl Private implementation helpers
+ @}
+ @}
+@}
+ */
+
+namespace cv {
+
+//! @addtogroup core_utils
+//! @{
+
+/*! @brief Class passed to an error.
+
+This class encapsulates all or almost all necessary
+information about the error happened in the program. The exception is
+usually constructed and thrown implicitly via CV_Error and CV_Error_ macros.
+@see error
+ */
+class CV_EXPORTS Exception : public std::exception
+{
+public:
+ /*!
+ Default constructor
+ */
+ Exception();
+ /*!
+ Full constructor. Normally the constuctor is not called explicitly.
+ Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used.
+ */
+ Exception(int _code, const String& _err, const String& _func, const String& _file, int _line);
+ virtual ~Exception() throw();
+
+ /*!
+ \return the error description and the context as a text string.
+ */
+ virtual const char *what() const throw();
+ void formatMessage();
+
+ String msg; ///< the formatted error message
+
+ int code; ///< error code @see CVStatus
+ String err; ///< error description
+ String func; ///< function name. Available only when the compiler supports getting it
+ String file; ///< source file name where the error has occured
+ int line; ///< line number in the source file where the error has occured
+};
+
+/*! @brief Signals an error and raises the exception.
+
+By default the function prints information about the error to stderr,
+then it either stops if cv::setBreakOnError() had been called before or raises the exception.
+It is possible to alternate error processing by using cv::redirectError().
+@param exc the exception raisen.
+@deprecated drop this version
+ */
+CV_EXPORTS void error( const Exception& exc );
+
+enum SortFlags { SORT_EVERY_ROW = 0, //!< each matrix row is sorted independently
+ SORT_EVERY_COLUMN = 1, //!< each matrix column is sorted
+ //!< independently; this flag and the previous one are
+ //!< mutually exclusive.
+ SORT_ASCENDING = 0, //!< each matrix row is sorted in the ascending
+ //!< order.
+ SORT_DESCENDING = 16 //!< each matrix row is sorted in the
+ //!< descending order; this flag and the previous one are also
+ //!< mutually exclusive.
+ };
+
+//! @} core_utils
+
+//! @addtogroup core
+//! @{
+
+//! Covariation flags
+enum CovarFlags {
+ /** The output covariance matrix is calculated as:
+ \f[\texttt{scale} \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...]^T \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...],\f]
+ The covariance matrix will be nsamples x nsamples. Such an unusual covariance matrix is used
+ for fast PCA of a set of very large vectors (see, for example, the EigenFaces technique for
+ face recognition). Eigenvalues of this "scrambled" matrix match the eigenvalues of the true
+ covariance matrix. The "true" eigenvectors can be easily calculated from the eigenvectors of
+ the "scrambled" covariance matrix. */
+ COVAR_SCRAMBLED = 0,
+ /**The output covariance matrix is calculated as:
+ \f[\texttt{scale} \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...] \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...]^T,\f]
+ covar will be a square matrix of the same size as the total number of elements in each input
+ vector. One and only one of COVAR_SCRAMBLED and COVAR_NORMAL must be specified.*/
+ COVAR_NORMAL = 1,
+ /** If the flag is specified, the function does not calculate mean from
+ the input vectors but, instead, uses the passed mean vector. This is useful if mean has been
+ pre-calculated or known in advance, or if the covariance matrix is calculated by parts. In
+ this case, mean is not a mean vector of the input sub-set of vectors but rather the mean
+ vector of the whole set.*/
+ COVAR_USE_AVG = 2,
+ /** If the flag is specified, the covariance matrix is scaled. In the
+ "normal" mode, scale is 1./nsamples . In the "scrambled" mode, scale is the reciprocal of the
+ total number of elements in each input vector. By default (if the flag is not specified), the
+ covariance matrix is not scaled ( scale=1 ).*/
+ COVAR_SCALE = 4,
+ /** If the flag is
+ specified, all the input vectors are stored as rows of the samples matrix. mean should be a
+ single-row vector in this case.*/
+ COVAR_ROWS = 8,
+ /** If the flag is
+ specified, all the input vectors are stored as columns of the samples matrix. mean should be a
+ single-column vector in this case.*/
+ COVAR_COLS = 16
+};
+
+//! k-Means flags
+enum KmeansFlags {
+ /** Select random initial centers in each attempt.*/
+ KMEANS_RANDOM_CENTERS = 0,
+ /** Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].*/
+ KMEANS_PP_CENTERS = 2,
+ /** During the first (and possibly the only) attempt, use the
+ user-supplied labels instead of computing them from the initial centers. For the second and
+ further attempts, use the random or semi-random centers. Use one of KMEANS_\*_CENTERS flag
+ to specify the exact method.*/
+ KMEANS_USE_INITIAL_LABELS = 1
+};
+
+//! type of line
+enum LineTypes {
+ FILLED = -1,
+ LINE_4 = 4, //!< 4-connected line
+ LINE_8 = 8, //!< 8-connected line
+ LINE_AA = 16 //!< antialiased line
+};
+
+//! Only a subset of Hershey fonts
+//! are supported
+enum HersheyFonts {
+ FONT_HERSHEY_SIMPLEX = 0, //!< normal size sans-serif font
+ FONT_HERSHEY_PLAIN = 1, //!< small size sans-serif font
+ FONT_HERSHEY_DUPLEX = 2, //!< normal size sans-serif font (more complex than FONT_HERSHEY_SIMPLEX)
+ FONT_HERSHEY_COMPLEX = 3, //!< normal size serif font
+ FONT_HERSHEY_TRIPLEX = 4, //!< normal size serif font (more complex than FONT_HERSHEY_COMPLEX)
+ FONT_HERSHEY_COMPLEX_SMALL = 5, //!< smaller version of FONT_HERSHEY_COMPLEX
+ FONT_HERSHEY_SCRIPT_SIMPLEX = 6, //!< hand-writing style font
+ FONT_HERSHEY_SCRIPT_COMPLEX = 7, //!< more complex variant of FONT_HERSHEY_SCRIPT_SIMPLEX
+ FONT_ITALIC = 16 //!< flag for italic font
+};
+
+enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix.
+ REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix.
+ REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix.
+ REDUCE_MIN = 3 //!< the output is the minimum (column/row-wise) of all rows/columns of the matrix.
+ };
+
+
+/** @brief Swaps two matrices
+*/
+CV_EXPORTS void swap(Mat& a, Mat& b);
+/** @overload */
+CV_EXPORTS void swap( UMat& a, UMat& b );
+
+//! @} core
+
+//! @addtogroup core_array
+//! @{
+
+/** @brief Computes the source location of an extrapolated pixel.
+
+The function computes and returns the coordinate of a donor pixel corresponding to the specified
+extrapolated pixel when using the specified extrapolation border mode. For example, if you use
+cv::BORDER_WRAP mode in the horizontal direction, cv::BORDER_REFLECT_101 in the vertical direction and
+want to compute value of the "virtual" pixel Point(-5, 100) in a floating-point image img , it
+looks like:
+@code{.cpp}
+ float val = img.at(borderInterpolate(100, img.rows, cv::BORDER_REFLECT_101),
+ borderInterpolate(-5, img.cols, cv::BORDER_WRAP));
+@endcode
+Normally, the function is not called directly. It is used inside filtering functions and also in
+copyMakeBorder.
+@param p 0-based coordinate of the extrapolated pixel along one of the axes, likely \<0 or \>= len
+@param len Length of the array along the corresponding axis.
+@param borderType Border type, one of the cv::BorderTypes, except for cv::BORDER_TRANSPARENT and
+cv::BORDER_ISOLATED . When borderType==cv::BORDER_CONSTANT , the function always returns -1, regardless
+of p and len.
+
+@sa copyMakeBorder
+*/
+CV_EXPORTS_W int borderInterpolate(int p, int len, int borderType);
+
+/** @brief Forms a border around an image.
+
+The function copies the source image into the middle of the destination image. The areas to the
+left, to the right, above and below the copied source image will be filled with extrapolated
+pixels. This is not what filtering functions based on it do (they extrapolate pixels on-fly), but
+what other more complex functions, including your own, may do to simplify image boundary handling.
+
+The function supports the mode when src is already in the middle of dst . In this case, the
+function does not copy src itself but simply constructs the border, for example:
+
+@code{.cpp}
+ // let border be the same in all directions
+ int border=2;
+ // constructs a larger image to fit both the image and the border
+ Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());
+ // select the middle part of it w/o copying data
+ Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));
+ // convert image from RGB to grayscale
+ cvtColor(rgb, gray, COLOR_RGB2GRAY);
+ // form a border in-place
+ copyMakeBorder(gray, gray_buf, border, border,
+ border, border, BORDER_REPLICATE);
+ // now do some custom filtering ...
+ ...
+@endcode
+@note When the source image is a part (ROI) of a bigger image, the function will try to use the
+pixels outside of the ROI to form a border. To disable this feature and always do extrapolation, as
+if src was not a ROI, use borderType | BORDER_ISOLATED.
+
+@param src Source image.
+@param dst Destination image of the same type as src and the size Size(src.cols+left+right,
+src.rows+top+bottom) .
+@param top
+@param bottom
+@param left
+@param right Parameter specifying how many pixels in each direction from the source image rectangle
+to extrapolate. For example, top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs
+to be built.
+@param borderType Border type. See borderInterpolate for details.
+@param value Border value if borderType==BORDER_CONSTANT .
+
+@sa borderInterpolate
+*/
+CV_EXPORTS_W void copyMakeBorder(InputArray src, OutputArray dst,
+ int top, int bottom, int left, int right,
+ int borderType, const Scalar& value = Scalar() );
+
+/** @brief Calculates the per-element sum of two arrays or an array and a scalar.
+
+The function add calculates:
+- Sum of two arrays when both input arrays have the same size and the same number of channels:
+\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]
+- Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of
+elements as `src1.channels()`:
+\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f]
+- Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of
+elements as `src2.channels()`:
+\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} + \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f]
+where `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each
+channel is processed independently.
+
+The first function in the list above can be replaced with matrix expressions:
+@code{.cpp}
+ dst = src1 + src2;
+ dst += src1; // equivalent to add(dst, src1, dst);
+@endcode
+The input arrays and the output array can all have the same or different depths. For example, you
+can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit
+floating-point array. Depth of the output array is determined by the dtype parameter. In the second
+and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can
+be set to the default -1. In this case, the output array will have the same depth as the input
+array, be it src1, src2 or both.
+@note Saturation is not applied when the output array has the depth CV_32S. You may even get
+result of an incorrect sign in the case of overflow.
+@param src1 first input array or a scalar.
+@param src2 second input array or a scalar.
+@param dst output array that has the same size and number of channels as the input array(s); the
+depth is defined by dtype or src1/src2.
+@param mask optional operation mask - 8-bit single channel array, that specifies elements of the
+output array to be changed.
+@param dtype optional depth of the output array (see the discussion below).
+@sa subtract, addWeighted, scaleAdd, Mat::convertTo
+*/
+CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst,
+ InputArray mask = noArray(), int dtype = -1);
+
+/** @brief Calculates the per-element difference between two arrays or array and a scalar.
+
+The function subtract calculates:
+- Difference between two arrays, when both input arrays have the same size and the same number of
+channels:
+ \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) - \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]
+- Difference between an array and a scalar, when src2 is constructed from Scalar or has the same
+number of elements as `src1.channels()`:
+ \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) - \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f]
+- Difference between a scalar and an array, when src1 is constructed from Scalar or has the same
+number of elements as `src2.channels()`:
+ \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} - \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f]
+- The reverse difference between a scalar and an array in the case of `SubRS`:
+ \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src2} - \texttt{src1}(I) ) \quad \texttt{if mask}(I) \ne0\f]
+where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each
+channel is processed independently.
+
+The first function in the list above can be replaced with matrix expressions:
+@code{.cpp}
+ dst = src1 - src2;
+ dst -= src1; // equivalent to subtract(dst, src1, dst);
+@endcode
+The input arrays and the output array can all have the same or different depths. For example, you
+can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of
+the output array is determined by dtype parameter. In the second and third cases above, as well as
+in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this
+case the output array will have the same depth as the input array, be it src1, src2 or both.
+@note Saturation is not applied when the output array has the depth CV_32S. You may even get
+result of an incorrect sign in the case of overflow.
+@param src1 first input array or a scalar.
+@param src2 second input array or a scalar.
+@param dst output array of the same size and the same number of channels as the input array.
+@param mask optional operation mask; this is an 8-bit single channel array that specifies elements
+of the output array to be changed.
+@param dtype optional depth of the output array
+@sa add, addWeighted, scaleAdd, Mat::convertTo
+ */
+CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst,
+ InputArray mask = noArray(), int dtype = -1);
+
+
+/** @brief Calculates the per-element scaled product of two arrays.
+
+The function multiply calculates the per-element product of two arrays:
+
+\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{scale} \cdot \texttt{src1} (I) \cdot \texttt{src2} (I))\f]
+
+There is also a @ref MatrixExpressions -friendly variant of the first function. See Mat::mul .
+
+For a not-per-element matrix product, see gemm .
+
+@note Saturation is not applied when the output array has the depth
+CV_32S. You may even get result of an incorrect sign in the case of
+overflow.
+@param src1 first input array.
+@param src2 second input array of the same size and the same type as src1.
+@param dst output array of the same size and type as src1.
+@param scale optional scale factor.
+@param dtype optional depth of the output array
+@sa add, subtract, divide, scaleAdd, addWeighted, accumulate, accumulateProduct, accumulateSquare,
+Mat::convertTo
+*/
+CV_EXPORTS_W void multiply(InputArray src1, InputArray src2,
+ OutputArray dst, double scale = 1, int dtype = -1);
+
+/** @brief Performs per-element division of two arrays or a scalar by an array.
+
+The functions divide divide one array by another:
+\f[\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\f]
+or a scalar by an array when there is no src1 :
+\f[\texttt{dst(I) = saturate(scale/src2(I))}\f]
+
+When src2(I) is zero, dst(I) will also be zero. Different channels of
+multi-channel arrays are processed independently.
+
+@note Saturation is not applied when the output array has the depth CV_32S. You may even get
+result of an incorrect sign in the case of overflow.
+@param src1 first input array.
+@param src2 second input array of the same size and type as src1.
+@param scale scalar factor.
+@param dst output array of the same size and type as src2.
+@param dtype optional depth of the output array; if -1, dst will have depth src2.depth(), but in
+case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().
+@sa multiply, add, subtract
+*/
+CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst,
+ double scale = 1, int dtype = -1);
+
+/** @overload */
+CV_EXPORTS_W void divide(double scale, InputArray src2,
+ OutputArray dst, int dtype = -1);
+
+/** @brief Calculates the sum of a scaled array and another array.
+
+The function scaleAdd is one of the classical primitive linear algebra operations, known as DAXPY
+or SAXPY in [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It calculates
+the sum of a scaled array and another array:
+\f[\texttt{dst} (I)= \texttt{scale} \cdot \texttt{src1} (I) + \texttt{src2} (I)\f]
+The function can also be emulated with a matrix expression, for example:
+@code{.cpp}
+ Mat A(3, 3, CV_64F);
+ ...
+ A.row(0) = A.row(1)*2 + A.row(2);
+@endcode
+@param src1 first input array.
+@param alpha scale factor for the first array.
+@param src2 second input array of the same size and type as src1.
+@param dst output array of the same size and type as src1.
+@sa add, addWeighted, subtract, Mat::dot, Mat::convertTo
+*/
+CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst);
+
+/** @brief Calculates the weighted sum of two arrays.
+
+The function addWeighted calculates the weighted sum of two arrays as follows:
+\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} + \texttt{src2} (I)* \texttt{beta} + \texttt{gamma} )\f]
+where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each
+channel is processed independently.
+The function can be replaced with a matrix expression:
+@code{.cpp}
+ dst = src1*alpha + src2*beta + gamma;
+@endcode
+@note Saturation is not applied when the output array has the depth CV_32S. You may even get
+result of an incorrect sign in the case of overflow.
+@param src1 first input array.
+@param alpha weight of the first array elements.
+@param src2 second input array of the same size and channel number as src1.
+@param beta weight of the second array elements.
+@param gamma scalar added to each sum.
+@param dst output array that has the same size and number of channels as the input arrays.
+@param dtype optional depth of the output array; when both input arrays have the same depth, dtype
+can be set to -1, which will be equivalent to src1.depth().
+@sa add, subtract, scaleAdd, Mat::convertTo
+*/
+CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2,
+ double beta, double gamma, OutputArray dst, int dtype = -1);
+
+/** @brief Scales, calculates absolute values, and converts the result to 8-bit.
+
+On each element of the input array, the function convertScaleAbs
+performs three operations sequentially: scaling, taking an absolute
+value, conversion to an unsigned 8-bit type:
+\f[\texttt{dst} (I)= \texttt{saturate\_cast} (| \texttt{src} (I)* \texttt{alpha} + \texttt{beta} |)\f]
+In case of multi-channel arrays, the function processes each channel
+independently. When the output is not 8-bit, the operation can be
+emulated by calling the Mat::convertTo method (or by using matrix
+expressions) and then by calculating an absolute value of the result.
+For example:
+@code{.cpp}
+ Mat_ A(30,30);
+ randu(A, Scalar(-100), Scalar(100));
+ Mat_ B = A*5 + 3;
+ B = abs(B);
+ // Mat_ B = abs(A*5+3) will also do the job,
+ // but it will allocate a temporary matrix
+@endcode
+@param src input array.
+@param dst output array.
+@param alpha optional scale factor.
+@param beta optional delta added to the scaled values.
+@sa Mat::convertTo, cv::abs(const Mat&)
+*/
+CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst,
+ double alpha = 1, double beta = 0);
+
+/** @brief Performs a look-up table transform of an array.
+
+The function LUT fills the output array with values from the look-up table. Indices of the entries
+are taken from the input array. That is, the function processes each element of src as follows:
+\f[\texttt{dst} (I) \leftarrow \texttt{lut(src(I) + d)}\f]
+where
+\f[d = \fork{0}{if \(\texttt{src}\) has depth \(\texttt{CV_8U}\)}{128}{if \(\texttt{src}\) has depth \(\texttt{CV_8S}\)}\f]
+@param src input array of 8-bit elements.
+@param lut look-up table of 256 elements; in case of multi-channel input array, the table should
+either have a single channel (in this case the same table is used for all channels) or the same
+number of channels as in the input array.
+@param dst output array of the same size and number of channels as src, and the same depth as lut.
+@sa convertScaleAbs, Mat::convertTo
+*/
+CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst);
+
+/** @brief Calculates the sum of array elements.
+
+The functions sum calculate and return the sum of array elements,
+independently for each channel.
+@param src input array that must have from 1 to 4 channels.
+@sa countNonZero, mean, meanStdDev, norm, minMaxLoc, reduce
+*/
+CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src);
+
+/** @brief Counts non-zero array elements.
+
+The function returns the number of non-zero elements in src :
+\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f]
+@param src single-channel array.
+@sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix
+*/
+CV_EXPORTS_W int countNonZero( InputArray src );
+
+/** @brief Returns the list of locations of non-zero pixels
+
+Given a binary matrix (likely returned from an operation such
+as threshold(), compare(), >, ==, etc, return all of
+the non-zero indices as a cv::Mat or std::vector (x,y)
+For example:
+@code{.cpp}
+ cv::Mat binaryImage; // input, binary image
+ cv::Mat locations; // output, locations of non-zero pixels
+ cv::findNonZero(binaryImage, locations);
+
+ // access pixel coordinates
+ Point pnt = locations.at(i);
+@endcode
+or
+@code{.cpp}
+ cv::Mat binaryImage; // input, binary image
+ vector locations; // output, locations of non-zero pixels
+ cv::findNonZero(binaryImage, locations);
+
+ // access pixel coordinates
+ Point pnt = locations[i];
+@endcode
+@param src single-channel array (type CV_8UC1)
+@param idx the output array, type of cv::Mat or std::vector, corresponding to non-zero indices in the input
+*/
+CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
+
+/** @brief Calculates an average (mean) of array elements.
+
+The function mean calculates the mean value M of array elements,
+independently for each channel, and return it:
+\f[\begin{array}{l} N = \sum _{I: \; \texttt{mask} (I) \ne 0} 1 \\ M_c = \left ( \sum _{I: \; \texttt{mask} (I) \ne 0}{ \texttt{mtx} (I)_c} \right )/N \end{array}\f]
+When all the mask elements are 0's, the functions return Scalar::all(0)
+@param src input array that should have from 1 to 4 channels so that the result can be stored in
+Scalar_ .
+@param mask optional operation mask.
+@sa countNonZero, meanStdDev, norm, minMaxLoc
+*/
+CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask = noArray());
+
+/** Calculates a mean and standard deviation of array elements.
+
+The function meanStdDev calculates the mean and the standard deviation M
+of array elements independently for each channel and returns it via the
+output parameters:
+\f[\begin{array}{l} N = \sum _{I, \texttt{mask} (I) \ne 0} 1 \\ \texttt{mean} _c = \frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \texttt{src} (I)_c}{N} \\ \texttt{stddev} _c = \sqrt{\frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \left ( \texttt{src} (I)_c - \texttt{mean} _c \right )^2}{N}} \end{array}\f]
+When all the mask elements are 0's, the functions return
+mean=stddev=Scalar::all(0).
+@note The calculated standard deviation is only the diagonal of the
+complete normalized covariance matrix. If the full matrix is needed, you
+can reshape the multi-channel array M x N to the single-channel array
+M\*N x mtx.channels() (only possible when the matrix is continuous) and
+then pass the matrix to calcCovarMatrix .
+@param src input array that should have from 1 to 4 channels so that the results can be stored in
+Scalar_ 's.
+@param mean output parameter: calculated mean value.
+@param stddev output parameter: calculateded standard deviation.
+@param mask optional operation mask.
+@sa countNonZero, mean, norm, minMaxLoc, calcCovarMatrix
+*/
+CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev,
+ InputArray mask=noArray());
+
+/** @brief Calculates an absolute array norm, an absolute difference norm, or a
+relative difference norm.
+
+The functions norm calculate an absolute norm of src1 (when there is no
+src2 ):
+
+\f[norm = \forkthree{\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
+{ \| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
+{ \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
+
+or an absolute or relative difference norm if src2 is there:
+
+\f[norm = \forkthree{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
+{ \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
+{ \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
+
+or
+
+\f[norm = \forkthree{\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE_INF}\) }
+{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L1}\) }
+{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L2}\) }\f]
+
+The functions norm return the calculated norm.
+
+When the mask parameter is specified and it is not empty, the norm is
+calculated only over the region specified by the mask.
+
+A multi-channel input arrays are treated as a single-channel, that is,
+the results for all channels are combined.
+
+@param src1 first input array.
+@param normType type of the norm (see cv::NormTypes).
+@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
+*/
+CV_EXPORTS_W double norm(InputArray src1, int normType = NORM_L2, InputArray mask = noArray());
+
+/** @overload
+@param src1 first input array.
+@param src2 second input array of the same size and the same type as src1.
+@param normType type of the norm (cv::NormTypes).
+@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
+*/
+CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
+ int normType = NORM_L2, InputArray mask = noArray());
+/** @overload
+@param src first input array.
+@param normType type of the norm (see cv::NormTypes).
+*/
+CV_EXPORTS double norm( const SparseMat& src, int normType );
+
+/** @brief computes PSNR image/video quality metric
+
+see http://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio for details
+@todo document
+ */
+CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
+
+/** @brief naive nearest neighbor finder
+
+see http://en.wikipedia.org/wiki/Nearest_neighbor_search
+@todo document
+ */
+CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2,
+ OutputArray dist, int dtype, OutputArray nidx,
+ int normType = NORM_L2, int K = 0,
+ InputArray mask = noArray(), int update = 0,
+ bool crosscheck = false);
+
+/** @brief Normalizes the norm or value range of an array.
+
+The functions normalize scale and shift the input array elements so that
+\f[\| \texttt{dst} \| _{L_p}= \texttt{alpha}\f]
+(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that
+\f[\min _I \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I \texttt{dst} (I)= \texttt{beta}\f]
+
+when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be
+normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this
+sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or
+min-max but modify the whole array, you can use norm and Mat::convertTo.
+
+In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this,
+the range transformation for sparse matrices is not allowed since it can shift the zero level.
+
+Possible usage with some positive example data:
+@code{.cpp}
+ vector positiveData = { 2.0, 8.0, 10.0 };
+ vector normalizedData_l1, normalizedData_l2, normalizedData_inf, normalizedData_minmax;
+
+ // Norm to probability (total count)
+ // sum(numbers) = 20.0
+ // 2.0 0.1 (2.0/20.0)
+ // 8.0 0.4 (8.0/20.0)
+ // 10.0 0.5 (10.0/20.0)
+ normalize(positiveData, normalizedData_l1, 1.0, 0.0, NORM_L1);
+
+ // Norm to unit vector: ||positiveData|| = 1.0
+ // 2.0 0.15
+ // 8.0 0.62
+ // 10.0 0.77
+ normalize(positiveData, normalizedData_l2, 1.0, 0.0, NORM_L2);
+
+ // Norm to max element
+ // 2.0 0.2 (2.0/10.0)
+ // 8.0 0.8 (8.0/10.0)
+ // 10.0 1.0 (10.0/10.0)
+ normalize(positiveData, normalizedData_inf, 1.0, 0.0, NORM_INF);
+
+ // Norm to range [0.0;1.0]
+ // 2.0 0.0 (shift to left border)
+ // 8.0 0.75 (6.0/8.0)
+ // 10.0 1.0 (shift to right border)
+ normalize(positiveData, normalizedData_minmax, 1.0, 0.0, NORM_MINMAX);
+@endcode
+
+@param src input array.
+@param dst output array of the same size as src .
+@param alpha norm value to normalize to or the lower range boundary in case of the range
+normalization.
+@param beta upper range boundary in case of the range normalization; it is not used for the norm
+normalization.
+@param norm_type normalization type (see cv::NormTypes).
+@param dtype when negative, the output array has the same type as src; otherwise, it has the same
+number of channels as src and the depth =CV_MAT_DEPTH(dtype).
+@param mask optional operation mask.
+@sa norm, Mat::convertTo, SparseMat::convertTo
+*/
+CV_EXPORTS_W void normalize( InputArray src, InputOutputArray dst, double alpha = 1, double beta = 0,
+ int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray());
+
+/** @overload
+@param src input array.
+@param dst output array of the same size as src .
+@param alpha norm value to normalize to or the lower range boundary in case of the range
+normalization.
+@param normType normalization type (see cv::NormTypes).
+*/
+CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType );
+
+/** @brief Finds the global minimum and maximum in an array.
+
+The functions minMaxLoc find the minimum and maximum element values and their positions. The
+extremums are searched across the whole array or, if mask is not an empty array, in the specified
+array region.
+
+The functions do not work with multi-channel arrays. If you need to find minimum or maximum
+elements across all the channels, use Mat::reshape first to reinterpret the array as
+single-channel. Or you may extract the particular channel using either extractImageCOI , or
+mixChannels , or split .
+@param src input single-channel array.
+@param minVal pointer to the returned minimum value; NULL is used if not required.
+@param maxVal pointer to the returned maximum value; NULL is used if not required.
+@param minLoc pointer to the returned minimum location (in 2D case); NULL is used if not required.
+@param maxLoc pointer to the returned maximum location (in 2D case); NULL is used if not required.
+@param mask optional mask used to select a sub-array.
+@sa max, min, compare, inRange, extractImageCOI, mixChannels, split, Mat::reshape
+*/
+CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal,
+ CV_OUT double* maxVal = 0, CV_OUT Point* minLoc = 0,
+ CV_OUT Point* maxLoc = 0, InputArray mask = noArray());
+
+
+/** @brief Finds the global minimum and maximum in an array
+
+The function minMaxIdx finds the minimum and maximum element values and their positions. The
+extremums are searched across the whole array or, if mask is not an empty array, in the specified
+array region. The function does not work with multi-channel arrays. If you need to find minimum or
+maximum elements across all the channels, use Mat::reshape first to reinterpret the array as
+single-channel. Or you may extract the particular channel using either extractImageCOI , or
+mixChannels , or split . In case of a sparse matrix, the minimum is found among non-zero elements
+only.
+@note When minIdx is not NULL, it must have at least 2 elements (as well as maxIdx), even if src is
+a single-row or single-column matrix. In OpenCV (following MATLAB) each array has at least 2
+dimensions, i.e. single-column matrix is Mx1 matrix (and therefore minIdx/maxIdx will be
+(i1,0)/(i2,0)) and single-row matrix is 1xN matrix (and therefore minIdx/maxIdx will be
+(0,j1)/(0,j2)).
+@param src input single-channel array.
+@param minVal pointer to the returned minimum value; NULL is used if not required.
+@param maxVal pointer to the returned maximum value; NULL is used if not required.
+@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required;
+Otherwise, it must point to an array of src.dims elements, the coordinates of the minimum element
+in each dimension are stored there sequentially.
+@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required.
+@param mask specified array region
+*/
+CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal = 0,
+ int* minIdx = 0, int* maxIdx = 0, InputArray mask = noArray());
+
+/** @overload
+@param a input single-channel array.
+@param minVal pointer to the returned minimum value; NULL is used if not required.
+@param maxVal pointer to the returned maximum value; NULL is used if not required.
+@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required;
+Otherwise, it must point to an array of src.dims elements, the coordinates of the minimum element
+in each dimension are stored there sequentially.
+@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required.
+*/
+CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal,
+ double* maxVal, int* minIdx = 0, int* maxIdx = 0);
+
+/** @brief Reduces a matrix to a vector.
+
+The function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of
+1D vectors and performing the specified operation on the vectors until a single row/column is
+obtained. For example, the function can be used to compute horizontal and vertical projections of a
+raster image. In case of REDUCE_SUM and REDUCE_AVG , the output may have a larger element
+bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction
+modes.
+@param src input 2D matrix.
+@param dst output vector. Its size and type is defined by dim and dtype parameters.
+@param dim dimension index along which the matrix is reduced. 0 means that the matrix is reduced to
+a single row. 1 means that the matrix is reduced to a single column.
+@param rtype reduction operation that could be one of cv::ReduceTypes
+@param dtype when negative, the output vector will have the same type as the input matrix,
+otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()).
+@sa repeat
+*/
+CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype = -1);
+
+/** @brief Creates one multi-channel array out of several single-channel ones.
+
+The function merge merges several arrays to make a single multi-channel array. That is, each
+element of the output array will be a concatenation of the elements of the input arrays, where
+elements of i-th input array are treated as mv[i].channels()-element vectors.
+
+The function cv::split does the reverse operation. If you need to shuffle channels in some other
+advanced way, use cv::mixChannels.
+@param mv input array of matrices to be merged; all the matrices in mv must have the same
+size and the same depth.
+@param count number of input matrices when mv is a plain C array; it must be greater than zero.
+@param dst output array of the same size and the same depth as mv[0]; The number of channels will
+be equal to the parameter count.
+@sa mixChannels, split, Mat::reshape
+*/
+CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst);
+
+/** @overload
+@param mv input vector of matrices to be merged; all the matrices in mv must have the same
+size and the same depth.
+@param dst output array of the same size and the same depth as mv[0]; The number of channels will
+be the total number of channels in the matrix array.
+ */
+CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst);
+
+/** @brief Divides a multi-channel array into several single-channel arrays.
+
+The functions split split a multi-channel array into separate single-channel arrays:
+\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
+If you need to extract a single channel or do some other sophisticated channel permutation, use
+mixChannels .
+@param src input multi-channel array.
+@param mvbegin output array; the number of arrays must match src.channels(); the arrays themselves are
+reallocated, if needed.
+@sa merge, mixChannels, cvtColor
+*/
+CV_EXPORTS void split(const Mat& src, Mat* mvbegin);
+
+/** @overload
+@param m input multi-channel array.
+@param mv output vector of arrays; the arrays themselves are reallocated, if needed.
+*/
+CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv);
+
+/** @brief Copies specified channels from input arrays to the specified channels of
+output arrays.
+
+The function cv::mixChannels provides an advanced mechanism for shuffling image channels.
+
+cv::split and cv::merge and some forms of cv::cvtColor are partial cases of cv::mixChannels .
+
+In the example below, the code splits a 4-channel BGRA image into a 3-channel BGR (with B and R
+channels swapped) and a separate alpha-channel image:
+@code{.cpp}
+ Mat bgra( 100, 100, CV_8UC4, Scalar(255,0,0,255) );
+ Mat bgr( bgra.rows, bgra.cols, CV_8UC3 );
+ Mat alpha( bgra.rows, bgra.cols, CV_8UC1 );
+
+ // forming an array of matrices is a quite efficient operation,
+ // because the matrix data is not copied, only the headers
+ Mat out[] = { bgr, alpha };
+ // bgra[0] -> bgr[2], bgra[1] -> bgr[1],
+ // bgra[2] -> bgr[0], bgra[3] -> alpha[0]
+ int from_to[] = { 0,2, 1,1, 2,0, 3,3 };
+ mixChannels( &bgra, 1, out, 2, from_to, 4 );
+@endcode
+@note Unlike many other new-style C++ functions in OpenCV (see the introduction section and
+Mat::create ), cv::mixChannels requires the output arrays to be pre-allocated before calling the
+function.
+@param src input array or vector of matrices; all of the matrices must have the same size and the
+same depth.
+@param nsrcs number of matrices in `src`.
+@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and
+depth must be the same as in `src[0]`.
+@param ndsts number of matrices in `dst`.
+@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is
+a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in
+dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to
+src[0].channels()-1, the second input image channels are indexed from src[0].channels() to
+src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image
+channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
+filled with zero .
+@param npairs number of index pairs in `fromTo`.
+@sa cv::split, cv::merge, cv::cvtColor
+*/
+CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts,
+ const int* fromTo, size_t npairs);
+
+/** @overload
+@param src input array or vector of matrices; all of the matrices must have the same size and the
+same depth.
+@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and
+depth must be the same as in src[0].
+@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is
+a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in
+dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to
+src[0].channels()-1, the second input image channels are indexed from src[0].channels() to
+src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image
+channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
+filled with zero .
+@param npairs number of index pairs in fromTo.
+*/
+CV_EXPORTS void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
+ const int* fromTo, size_t npairs);
+
+/** @overload
+@param src input array or vector of matrices; all of the matrices must have the same size and the
+same depth.
+@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and
+depth must be the same as in src[0].
+@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is
+a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in
+dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to
+src[0].channels()-1, the second input image channels are indexed from src[0].channels() to
+src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image
+channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
+filled with zero .
+*/
+CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
+ const std::vector& fromTo);
+
+/** @brief extracts a single channel from src (coi is 0-based index)
+@todo document
+*/
+CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi);
+
+/** @brief inserts a single channel to dst (coi is 0-based index)
+@todo document
+*/
+CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi);
+
+/** @brief Flips a 2D array around vertical, horizontal, or both axes.
+
+The function flip flips the array in one of three different ways (row
+and column indices are 0-based):
+\f[\texttt{dst} _{ij} =
+\left\{
+\begin{array}{l l}
+\texttt{src} _{\texttt{src.rows}-i-1,j} & if\; \texttt{flipCode} = 0 \\
+\texttt{src} _{i, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} > 0 \\
+\texttt{src} _{ \texttt{src.rows} -i-1, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} < 0 \\
+\end{array}
+\right.\f]
+The example scenarios of using the function are the following:
+* Vertical flipping of the image (flipCode == 0) to switch between
+ top-left and bottom-left image origin. This is a typical operation
+ in video processing on Microsoft Windows\* OS.
+* Horizontal flipping of the image with the subsequent horizontal
+ shift and absolute difference calculation to check for a
+ vertical-axis symmetry (flipCode \> 0).
+* Simultaneous horizontal and vertical flipping of the image with
+ the subsequent shift and absolute difference calculation to check
+ for a central symmetry (flipCode \< 0).
+* Reversing the order of point arrays (flipCode \> 0 or
+ flipCode == 0).
+@param src input array.
+@param dst output array of the same size and type as src.
+@param flipCode a flag to specify how to flip the array; 0 means
+flipping around the x-axis and positive value (for example, 1) means
+flipping around y-axis. Negative value (for example, -1) means flipping
+around both axes.
+@sa transpose , repeat , completeSymm
+*/
+CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);
+
+/** @brief Fills the output array with repeated copies of the input array.
+
+The functions repeat duplicate the input array one or more times along each of the two axes:
+\f[\texttt{dst} _{ij}= \texttt{src} _{i\mod src.rows, \; j\mod src.cols }\f]
+The second variant of the function is more convenient to use with @ref MatrixExpressions.
+@param src input array to replicate.
+@param dst output array of the same type as src.
+@param ny Flag to specify how many times the src is repeated along the
+vertical axis.
+@param nx Flag to specify how many times the src is repeated along the
+horizontal axis.
+@sa reduce
+*/
+CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst);
+
+/** @overload
+@param src input array to replicate.
+@param ny Flag to specify how many times the src is repeated along the
+vertical axis.
+@param nx Flag to specify how many times the src is repeated along the
+horizontal axis.
+ */
+CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx);
+
+/** @brief Applies horizontal concatenation to given matrices.
+
+The function horizontally concatenates two or more cv::Mat matrices (with the same number of rows).
+@code{.cpp}
+ cv::Mat matArray[] = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),
+ cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),
+ cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};
+
+ cv::Mat out;
+ cv::hconcat( matArray, 3, out );
+ //out:
+ //[1, 2, 3;
+ // 1, 2, 3;
+ // 1, 2, 3;
+ // 1, 2, 3]
+@endcode
+@param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.
+@param nsrc number of matrices in src.
+@param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.
+@sa cv::vconcat(const Mat*, size_t, OutputArray), @sa cv::vconcat(InputArrayOfArrays, OutputArray) and @sa cv::vconcat(InputArray, InputArray, OutputArray)
+*/
+CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst);
+/** @overload
+ @code{.cpp}
+ cv::Mat_ A = (cv::Mat_(3, 2) << 1, 4,
+ 2, 5,
+ 3, 6);
+ cv::Mat_ B = (cv::Mat_(3, 2) << 7, 10,
+ 8, 11,
+ 9, 12);
+
+ cv::Mat C;
+ cv::hconcat(A, B, C);
+ //C:
+ //[1, 4, 7, 10;
+ // 2, 5, 8, 11;
+ // 3, 6, 9, 12]
+ @endcode
+ @param src1 first input array to be considered for horizontal concatenation.
+ @param src2 second input array to be considered for horizontal concatenation.
+ @param dst output array. It has the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.
+ */
+CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst);
+/** @overload
+ @code{.cpp}
+ std::vector matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),
+ cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),
+ cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};
+
+ cv::Mat out;
+ cv::hconcat( matrices, out );
+ //out:
+ //[1, 2, 3;
+ // 1, 2, 3;
+ // 1, 2, 3;
+ // 1, 2, 3]
+ @endcode
+ @param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.
+ @param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.
+same depth.
+ */
+CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst);
+
+/** @brief Applies vertical concatenation to given matrices.
+
+The function vertically concatenates two or more cv::Mat matrices (with the same number of cols).
+@code{.cpp}
+ cv::Mat matArray[] = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),
+ cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),
+ cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};
+
+ cv::Mat out;
+ cv::vconcat( matArray, 3, out );
+ //out:
+ //[1, 1, 1, 1;
+ // 2, 2, 2, 2;
+ // 3, 3, 3, 3]
+@endcode
+@param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth.
+@param nsrc number of matrices in src.
+@param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
+@sa cv::hconcat(const Mat*, size_t, OutputArray), @sa cv::hconcat(InputArrayOfArrays, OutputArray) and @sa cv::hconcat(InputArray, InputArray, OutputArray)
+*/
+CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst);
+/** @overload
+ @code{.cpp}
+ cv::Mat_ A = (cv::Mat_(3, 2) << 1, 7,
+ 2, 8,
+ 3, 9);
+ cv::Mat_ B = (cv::Mat_(3, 2) << 4, 10,
+ 5, 11,
+ 6, 12);
+
+ cv::Mat C;
+ cv::vconcat(A, B, C);
+ //C:
+ //[1, 7;
+ // 2, 8;
+ // 3, 9;
+ // 4, 10;
+ // 5, 11;
+ // 6, 12]
+ @endcode
+ @param src1 first input array to be considered for vertical concatenation.
+ @param src2 second input array to be considered for vertical concatenation.
+ @param dst output array. It has the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.
+ */
+CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst);
+/** @overload
+ @code{.cpp}
+ std::vector matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),
+ cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),
+ cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};
+
+ cv::Mat out;
+ cv::vconcat( matrices, out );
+ //out:
+ //[1, 1, 1, 1;
+ // 2, 2, 2, 2;
+ // 3, 3, 3, 3]
+ @endcode
+ @param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth
+ @param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
+same depth.
+ */
+CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);
+
+/** @brief computes bitwise conjunction of the two arrays (dst = src1 & src2)
+Calculates the per-element bit-wise conjunction of two arrays or an
+array and a scalar.
+
+The function calculates the per-element bit-wise logical conjunction for:
+* Two arrays when src1 and src2 have the same size:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
+* An array and a scalar when src2 is constructed from Scalar or has
+ the same number of elements as `src1.channels()`:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
+* A scalar and an array when src1 is constructed from Scalar or has
+ the same number of elements as `src2.channels()`:
+ \f[\texttt{dst} (I) = \texttt{src1} \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
+In case of floating-point arrays, their machine-specific bit
+representations (usually IEEE754-compliant) are used for the operation.
+In case of multi-channel arrays, each channel is processed
+independently. In the second and third cases above, the scalar is first
+converted to the array type.
+@param src1 first input array or a scalar.
+@param src2 second input array or a scalar.
+@param dst output array that has the same size and type as the input
+arrays.
+@param mask optional operation mask, 8-bit single channel array, that
+specifies elements of the output array to be changed.
+*/
+CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2,
+ OutputArray dst, InputArray mask = noArray());
+
+/** @brief Calculates the per-element bit-wise disjunction of two arrays or an
+array and a scalar.
+
+The function calculates the per-element bit-wise logical disjunction for:
+* Two arrays when src1 and src2 have the same size:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
+* An array and a scalar when src2 is constructed from Scalar or has
+ the same number of elements as `src1.channels()`:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
+* A scalar and an array when src1 is constructed from Scalar or has
+ the same number of elements as `src2.channels()`:
+ \f[\texttt{dst} (I) = \texttt{src1} \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
+In case of floating-point arrays, their machine-specific bit
+representations (usually IEEE754-compliant) are used for the operation.
+In case of multi-channel arrays, each channel is processed
+independently. In the second and third cases above, the scalar is first
+converted to the array type.
+@param src1 first input array or a scalar.
+@param src2 second input array or a scalar.
+@param dst output array that has the same size and type as the input
+arrays.
+@param mask optional operation mask, 8-bit single channel array, that
+specifies elements of the output array to be changed.
+*/
+CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2,
+ OutputArray dst, InputArray mask = noArray());
+
+/** @brief Calculates the per-element bit-wise "exclusive or" operation on two
+arrays or an array and a scalar.
+
+The function calculates the per-element bit-wise logical "exclusive-or"
+operation for:
+* Two arrays when src1 and src2 have the same size:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
+* An array and a scalar when src2 is constructed from Scalar or has
+ the same number of elements as `src1.channels()`:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
+* A scalar and an array when src1 is constructed from Scalar or has
+ the same number of elements as `src2.channels()`:
+ \f[\texttt{dst} (I) = \texttt{src1} \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
+In case of floating-point arrays, their machine-specific bit
+representations (usually IEEE754-compliant) are used for the operation.
+In case of multi-channel arrays, each channel is processed
+independently. In the 2nd and 3rd cases above, the scalar is first
+converted to the array type.
+@param src1 first input array or a scalar.
+@param src2 second input array or a scalar.
+@param dst output array that has the same size and type as the input
+arrays.
+@param mask optional operation mask, 8-bit single channel array, that
+specifies elements of the output array to be changed.
+*/
+CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2,
+ OutputArray dst, InputArray mask = noArray());
+
+/** @brief Inverts every bit of an array.
+
+The function calculates per-element bit-wise inversion of the input
+array:
+\f[\texttt{dst} (I) = \neg \texttt{src} (I)\f]
+In case of a floating-point input array, its machine-specific bit
+representation (usually IEEE754-compliant) is used for the operation. In
+case of multi-channel arrays, each channel is processed independently.
+@param src input array.
+@param dst output array that has the same size and type as the input
+array.
+@param mask optional operation mask, 8-bit single channel array, that
+specifies elements of the output array to be changed.
+*/
+CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst,
+ InputArray mask = noArray());
+
+/** @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.
+
+The function absdiff calculates:
+* Absolute difference between two arrays when they have the same
+ size and type:
+ \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2}(I)|)\f]
+* Absolute difference between an array and a scalar when the second
+ array is constructed from Scalar or has as many elements as the
+ number of channels in `src1`:
+ \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2} |)\f]
+* Absolute difference between a scalar and an array when the first
+ array is constructed from Scalar or has as many elements as the
+ number of channels in `src2`:
+ \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1} - \texttt{src2}(I) |)\f]
+ where I is a multi-dimensional index of array elements. In case of
+ multi-channel arrays, each channel is processed independently.
+@note Saturation is not applied when the arrays have the depth CV_32S.
+You may even get a negative value in the case of overflow.
+@param src1 first input array or a scalar.
+@param src2 second input array or a scalar.
+@param dst output array that has the same size and type as input arrays.
+@sa cv::abs(const Mat&)
+*/
+CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst);
+
+/** @brief Checks if array elements lie between the elements of two other arrays.
+
+The function checks the range as follows:
+- For every element of a single-channel input array:
+ \f[\texttt{dst} (I)= \texttt{lowerb} (I)_0 \leq \texttt{src} (I)_0 \leq \texttt{upperb} (I)_0\f]
+- For two-channel arrays:
+ \f[\texttt{dst} (I)= \texttt{lowerb} (I)_0 \leq \texttt{src} (I)_0 \leq \texttt{upperb} (I)_0 \land \texttt{lowerb} (I)_1 \leq \texttt{src} (I)_1 \leq \texttt{upperb} (I)_1\f]
+- and so forth.
+
+That is, dst (I) is set to 255 (all 1 -bits) if src (I) is within the
+specified 1D, 2D, 3D, ... box and 0 otherwise.
+
+When the lower and/or upper boundary parameters are scalars, the indexes
+(I) at lowerb and upperb in the above formulas should be omitted.
+@param src first input array.
+@param lowerb inclusive lower boundary array or a scalar.
+@param upperb inclusive upper boundary array or a scalar.
+@param dst output array of the same size as src and CV_8U type.
+*/
+CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb,
+ InputArray upperb, OutputArray dst);
+
+/** @brief Performs the per-element comparison of two arrays or an array and scalar value.
+
+The function compares:
+* Elements of two arrays when src1 and src2 have the same size:
+ \f[\texttt{dst} (I) = \texttt{src1} (I) \,\texttt{cmpop}\, \texttt{src2} (I)\f]
+* Elements of src1 with a scalar src2 when src2 is constructed from
+ Scalar or has a single element:
+ \f[\texttt{dst} (I) = \texttt{src1}(I) \,\texttt{cmpop}\, \texttt{src2}\f]
+* src1 with elements of src2 when src1 is constructed from Scalar or
+ has a single element:
+ \f[\texttt{dst} (I) = \texttt{src1} \,\texttt{cmpop}\, \texttt{src2} (I)\f]
+When the comparison result is true, the corresponding element of output
+array is set to 255. The comparison operations can be replaced with the
+equivalent matrix expressions:
+@code{.cpp}
+ Mat dst1 = src1 >= src2;
+ Mat dst2 = src1 < 8;
+ ...
+@endcode
+@param src1 first input array or a scalar; when it is an array, it must have a single channel.
+@param src2 second input array or a scalar; when it is an array, it must have a single channel.
+@param dst output array of type ref CV_8U that has the same size and the same number of channels as
+ the input arrays.
+@param cmpop a flag, that specifies correspondence between the arrays (cv::CmpTypes)
+@sa checkRange, min, max, threshold
+*/
+CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop);
+
+/** @brief Calculates per-element minimum of two arrays or an array and a scalar.
+
+The functions min calculate the per-element minimum of two arrays:
+\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{src2} (I))\f]
+or array and a scalar:
+\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{value} )\f]
+@param src1 first input array.
+@param src2 second input array of the same size and type as src1.
+@param dst output array of the same size and type as src1.
+@sa max, compare, inRange, minMaxLoc
+*/
+CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst);
+/** @overload
+needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
+*/
+CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);
+/** @overload
+needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
+*/
+CV_EXPORTS void min(const UMat& src1, const UMat& src2, UMat& dst);
+
+/** @brief Calculates per-element maximum of two arrays or an array and a scalar.
+
+The functions max calculate the per-element maximum of two arrays:
+\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{src2} (I))\f]
+or array and a scalar:
+\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{value} )\f]
+@param src1 first input array.
+@param src2 second input array of the same size and type as src1 .
+@param dst output array of the same size and type as src1.
+@sa min, compare, inRange, minMaxLoc, @ref MatrixExpressions
+*/
+CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst);
+/** @overload
+needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
+*/
+CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);
+/** @overload
+needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
+*/
+CV_EXPORTS void max(const UMat& src1, const UMat& src2, UMat& dst);
+
+/** @brief Calculates a square root of array elements.
+
+The functions sqrt calculate a square root of each input array element.
+In case of multi-channel arrays, each channel is processed
+independently. The accuracy is approximately the same as of the built-in
+std::sqrt .
+@param src input floating-point array.
+@param dst output array of the same size and type as src.
+*/
+CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst);
+
+/** @brief Raises every array element to a power.
+
+The function pow raises every element of the input array to power :
+\f[\texttt{dst} (I) = \fork{\texttt{src}(I)^{power}}{if \(\texttt{power}\) is integer}{|\texttt{src}(I)|^{power}}{otherwise}\f]
+
+So, for a non-integer power exponent, the absolute values of input array
+elements are used. However, it is possible to get true values for
+negative values using some extra operations. In the example below,
+computing the 5th root of array src shows:
+@code{.cpp}
+ Mat mask = src < 0;
+ pow(src, 1./5, dst);
+ subtract(Scalar::all(0), dst, dst, mask);
+@endcode
+For some values of power, such as integer values, 0.5 and -0.5,
+specialized faster algorithms are used.
+
+Special values (NaN, Inf) are not handled.
+@param src input array.
+@param power exponent of power.
+@param dst output array of the same size and type as src.
+@sa sqrt, exp, log, cartToPolar, polarToCart
+*/
+CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst);
+
+/** @brief Calculates the exponent of every array element.
+
+The function exp calculates the exponent of every element of the input
+array:
+\f[\texttt{dst} [I] = e^{ src(I) }\f]
+
+The maximum relative error is about 7e-6 for single-precision input and
+less than 1e-10 for double-precision input. Currently, the function
+converts denormalized values to zeros on output. Special values (NaN,
+Inf) are not handled.
+@param src input array.
+@param dst output array of the same size and type as src.
+@sa log , cartToPolar , polarToCart , phase , pow , sqrt , magnitude
+*/
+CV_EXPORTS_W void exp(InputArray src, OutputArray dst);
+
+/** @brief Calculates the natural logarithm of every array element.
+
+The function log calculates the natural logarithm of the absolute value
+of every element of the input array:
+\f[\texttt{dst} (I) = \fork{\log |\texttt{src}(I)|}{if \(\texttt{src}(I) \ne 0\) }{\texttt{C}}{otherwise}\f]
+
+where C is a large negative number (about -700 in the current
+implementation). The maximum relative error is about 7e-6 for
+single-precision input and less than 1e-10 for double-precision input.
+Special values (NaN, Inf) are not handled.
+@param src input array.
+@param dst output array of the same size and type as src .
+@sa exp, cartToPolar, polarToCart, phase, pow, sqrt, magnitude
+*/
+CV_EXPORTS_W void log(InputArray src, OutputArray dst);
+
+/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.
+
+The function polarToCart calculates the Cartesian coordinates of each 2D
+vector represented by the corresponding elements of magnitude and angle:
+\f[\begin{array}{l} \texttt{x} (I) = \texttt{magnitude} (I) \cos ( \texttt{angle} (I)) \\ \texttt{y} (I) = \texttt{magnitude} (I) \sin ( \texttt{angle} (I)) \\ \end{array}\f]
+
+The relative accuracy of the estimated coordinates is about 1e-6.
+@param magnitude input floating-point array of magnitudes of 2D vectors;
+it can be an empty matrix (=Mat()), in this case, the function assumes
+that all the magnitudes are =1; if it is not empty, it must have the
+same size and type as angle.
+@param angle input floating-point array of angles of 2D vectors.
+@param x output array of x-coordinates of 2D vectors; it has the same
+size and type as angle.
+@param y output array of y-coordinates of 2D vectors; it has the same
+size and type as angle.
+@param angleInDegrees when true, the input angles are measured in
+degrees, otherwise, they are measured in radians.
+@sa cartToPolar, magnitude, phase, exp, log, pow, sqrt
+*/
+CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle,
+ OutputArray x, OutputArray y, bool angleInDegrees = false);
+
+/** @brief Calculates the magnitude and angle of 2D vectors.
+
+The function cartToPolar calculates either the magnitude, angle, or both
+for every 2D vector (x(I),y(I)):
+\f[\begin{array}{l} \texttt{magnitude} (I)= \sqrt{\texttt{x}(I)^2+\texttt{y}(I)^2} , \\ \texttt{angle} (I)= \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))[ \cdot180 / \pi ] \end{array}\f]
+
+The angles are calculated with accuracy about 0.3 degrees. For the point
+(0,0), the angle is set to 0.
+@param x array of x-coordinates; this must be a single-precision or
+double-precision floating-point array.
+@param y array of y-coordinates, that must have the same size and same type as x.
+@param magnitude output array of magnitudes of the same size and type as x.
+@param angle output array of angles that has the same size and type as
+x; the angles are measured in radians (from 0 to 2\*Pi) or in degrees (0 to 360 degrees).
+@param angleInDegrees a flag, indicating whether the angles are measured
+in radians (which is by default), or in degrees.
+@sa Sobel, Scharr
+*/
+CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y,
+ OutputArray magnitude, OutputArray angle,
+ bool angleInDegrees = false);
+
+/** @brief Calculates the rotation angle of 2D vectors.
+
+The function phase calculates the rotation angle of each 2D vector that
+is formed from the corresponding elements of x and y :
+\f[\texttt{angle} (I) = \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))\f]
+
+The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 ,
+the corresponding angle(I) is set to 0.
+@param x input floating-point array of x-coordinates of 2D vectors.
+@param y input array of y-coordinates of 2D vectors; it must have the
+same size and the same type as x.
+@param angle output array of vector angles; it has the same size and
+same type as x .
+@param angleInDegrees when true, the function calculates the angle in
+degrees, otherwise, they are measured in radians.
+*/
+CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle,
+ bool angleInDegrees = false);
+
+/** @brief Calculates the magnitude of 2D vectors.
+
+The function magnitude calculates the magnitude of 2D vectors formed
+from the corresponding elements of x and y arrays:
+\f[\texttt{dst} (I) = \sqrt{\texttt{x}(I)^2 + \texttt{y}(I)^2}\f]
+@param x floating-point array of x-coordinates of the vectors.
+@param y floating-point array of y-coordinates of the vectors; it must
+have the same size as x.
+@param magnitude output array of the same size and type as x.
+@sa cartToPolar, polarToCart, phase, sqrt
+*/
+CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude);
+
+/** @brief Checks every element of an input array for invalid values.
+
+The functions checkRange check that every array element is neither NaN nor infinite. When minVal \>
+-DBL_MAX and maxVal \< DBL_MAX, the functions also check that each value is between minVal and
+maxVal. In case of multi-channel arrays, each channel is processed independently. If some values
+are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the
+functions either return false (when quiet=true) or throw an exception.
+@param a input array.
+@param quiet a flag, indicating whether the functions quietly return false when the array elements
+are out of range or they throw an exception.
+@param pos optional output parameter, when not NULL, must be a pointer to array of src.dims
+elements.
+@param minVal inclusive lower boundary of valid values range.
+@param maxVal exclusive upper boundary of valid values range.
+*/
+CV_EXPORTS_W bool checkRange(InputArray a, bool quiet = true, CV_OUT Point* pos = 0,
+ double minVal = -DBL_MAX, double maxVal = DBL_MAX);
+
+/** @brief converts NaN's to the given number
+*/
+CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0);
+
+/** @brief Performs generalized matrix multiplication.
+
+The function performs generalized matrix multiplication similar to the
+gemm functions in BLAS level 3. For example,
+`gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`
+corresponds to
+\f[\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T\f]
+
+In case of complex (two-channel) data, performed a complex matrix
+multiplication.
+
+The function can be replaced with a matrix expression. For example, the
+above call can be replaced with:
+@code{.cpp}
+ dst = alpha*src1.t()*src2 + beta*src3.t();
+@endcode
+@param src1 first multiplied input matrix that could be real(CV_32FC1,
+CV_64FC1) or complex(CV_32FC2, CV_64FC2).
+@param src2 second multiplied input matrix of the same type as src1.
+@param alpha weight of the matrix product.
+@param src3 third optional delta matrix added to the matrix product; it
+should have the same type as src1 and src2.
+@param beta weight of src3.
+@param dst output matrix; it has the proper size and the same type as
+input matrices.
+@param flags operation flags (cv::GemmFlags)
+@sa mulTransposed , transform
+*/
+CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
+ InputArray src3, double beta, OutputArray dst, int flags = 0);
+
+/** @brief Calculates the product of a matrix and its transposition.
+
+The function mulTransposed calculates the product of src and its
+transposition:
+\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} )^T ( \texttt{src} - \texttt{delta} )\f]
+if aTa=true , and
+\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} ) ( \texttt{src} - \texttt{delta} )^T\f]
+otherwise. The function is used to calculate the covariance matrix. With
+zero delta, it can be used as a faster substitute for general matrix
+product A\*B when B=A'
+@param src input single-channel matrix. Note that unlike gemm, the
+function can multiply not only floating-point matrices.
+@param dst output square matrix.
+@param aTa Flag specifying the multiplication ordering. See the
+description below.
+@param delta Optional delta matrix subtracted from src before the
+multiplication. When the matrix is empty ( delta=noArray() ), it is
+assumed to be zero, that is, nothing is subtracted. If it has the same
+size as src , it is simply subtracted. Otherwise, it is "repeated" (see
+repeat ) to cover the full src and then subtracted. Type of the delta
+matrix, when it is not empty, must be the same as the type of created
+output matrix. See the dtype parameter description below.
+@param scale Optional scale factor for the matrix product.
+@param dtype Optional type of the output matrix. When it is negative,
+the output matrix will have the same type as src . Otherwise, it will be
+type=CV_MAT_DEPTH(dtype) that should be either CV_32F or CV_64F .
+@sa calcCovarMatrix, gemm, repeat, reduce
+*/
+CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,
+ InputArray delta = noArray(),
+ double scale = 1, int dtype = -1 );
+
+/** @brief Transposes a matrix.
+
+The function transpose transposes the matrix src :
+\f[\texttt{dst} (i,j) = \texttt{src} (j,i)\f]
+@note No complex conjugation is done in case of a complex matrix. It it
+should be done separately if needed.
+@param src input array.
+@param dst output array of the same type as src.
+*/
+CV_EXPORTS_W void transpose(InputArray src, OutputArray dst);
+
+/** @brief Performs the matrix transformation of every array element.
+
+The function transform performs the matrix transformation of every
+element of the array src and stores the results in dst :
+\f[\texttt{dst} (I) = \texttt{m} \cdot \texttt{src} (I)\f]
+(when m.cols=src.channels() ), or
+\f[\texttt{dst} (I) = \texttt{m} \cdot [ \texttt{src} (I); 1]\f]
+(when m.cols=src.channels()+1 )
+
+Every element of the N -channel array src is interpreted as N -element
+vector that is transformed using the M x N or M x (N+1) matrix m to
+M-element vector - the corresponding element of the output array dst .
+
+The function may be used for geometrical transformation of
+N -dimensional points, arbitrary linear color space transformation (such
+as various kinds of RGB to YUV transforms), shuffling the image
+channels, and so forth.
+@param src input array that must have as many channels (1 to 4) as
+m.cols or m.cols-1.
+@param dst output array of the same size and depth as src; it has as
+many channels as m.rows.
+@param m transformation 2x2 or 2x3 floating-point matrix.
+@sa perspectiveTransform, getAffineTransform, estimateRigidTransform, warpAffine, warpPerspective
+*/
+CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m );
+
+/** @brief Performs the perspective matrix transformation of vectors.
+
+The function perspectiveTransform transforms every element of src by
+treating it as a 2D or 3D vector, in the following way:
+\f[(x, y, z) \rightarrow (x'/w, y'/w, z'/w)\f]
+where
+\f[(x', y', z', w') = \texttt{mat} \cdot \begin{bmatrix} x & y & z & 1 \end{bmatrix}\f]
+and
+\f[w = \fork{w'}{if \(w' \ne 0\)}{\infty}{otherwise}\f]
+
+Here a 3D vector transformation is shown. In case of a 2D vector
+transformation, the z component is omitted.
+
+@note The function transforms a sparse set of 2D or 3D vectors. If you
+want to transform an image using perspective transformation, use
+warpPerspective . If you have an inverse problem, that is, you want to
+compute the most probable perspective transformation out of several
+pairs of corresponding points, you can use getPerspectiveTransform or
+findHomography .
+@param src input two-channel or three-channel floating-point array; each
+element is a 2D/3D vector to be transformed.
+@param dst output array of the same size and type as src.
+@param m 3x3 or 4x4 floating-point transformation matrix.
+@sa transform, warpPerspective, getPerspectiveTransform, findHomography
+*/
+CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m );
+
+/** @brief Copies the lower or the upper half of a square matrix to another half.
+
+The function completeSymm copies the lower half of a square matrix to
+its another half. The matrix diagonal remains unchanged:
+* \f$\texttt{mtx}_{ij}=\texttt{mtx}_{ji}\f$ for \f$i > j\f$ if
+ lowerToUpper=false
+* \f$\texttt{mtx}_{ij}=\texttt{mtx}_{ji}\f$ for \f$i < j\f$ if
+ lowerToUpper=true
+@param mtx input-output floating-point square matrix.
+@param lowerToUpper operation flag; if true, the lower half is copied to
+the upper half. Otherwise, the upper half is copied to the lower half.
+@sa flip, transpose
+*/
+CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper = false);
+
+/** @brief Initializes a scaled identity matrix.
+
+The function setIdentity initializes a scaled identity matrix:
+\f[\texttt{mtx} (i,j)= \fork{\texttt{value}}{ if \(i=j\)}{0}{otherwise}\f]
+
+The function can also be emulated using the matrix initializers and the
+matrix expressions:
+@code
+ Mat A = Mat::eye(4, 3, CV_32F)*5;
+ // A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]
+@endcode
+@param mtx matrix to initialize (not necessarily square).
+@param s value to assign to diagonal elements.
+@sa Mat::zeros, Mat::ones, Mat::setTo, Mat::operator=
+*/
+CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s = Scalar(1));
+
+/** @brief Returns the determinant of a square floating-point matrix.
+
+The function determinant calculates and returns the determinant of the
+specified matrix. For small matrices ( mtx.cols=mtx.rows\<=3 ), the
+direct method is used. For larger matrices, the function uses LU
+factorization with partial pivoting.
+
+For symmetric positively-determined matrices, it is also possible to use
+eigen decomposition to calculate the determinant.
+@param mtx input matrix that must have CV_32FC1 or CV_64FC1 type and
+square size.
+@sa trace, invert, solve, eigen, @ref MatrixExpressions
+*/
+CV_EXPORTS_W double determinant(InputArray mtx);
+
+/** @brief Returns the trace of a matrix.
+
+The function trace returns the sum of the diagonal elements of the
+matrix mtx .
+\f[\mathrm{tr} ( \texttt{mtx} ) = \sum _i \texttt{mtx} (i,i)\f]
+@param mtx input matrix.
+*/
+CV_EXPORTS_W Scalar trace(InputArray mtx);
+
+/** @brief Finds the inverse or pseudo-inverse of a matrix.
+
+The function invert inverts the matrix src and stores the result in dst
+. When the matrix src is singular or non-square, the function calculates
+the pseudo-inverse matrix (the dst matrix) so that norm(src\*dst - I) is
+minimal, where I is an identity matrix.
+
+In case of the DECOMP_LU method, the function returns non-zero value if
+the inverse has been successfully calculated and 0 if src is singular.
+
+In case of the DECOMP_SVD method, the function returns the inverse
+condition number of src (the ratio of the smallest singular value to the
+largest singular value) and 0 if src is singular. The SVD method
+calculates a pseudo-inverse matrix if src is singular.
+
+Similarly to DECOMP_LU, the method DECOMP_CHOLESKY works only with
+non-singular square matrices that should also be symmetrical and
+positively defined. In this case, the function stores the inverted
+matrix in dst and returns non-zero. Otherwise, it returns 0.
+
+@param src input floating-point M x N matrix.
+@param dst output matrix of N x M size and the same type as src.
+@param flags inversion method (cv::DecompTypes)
+@sa solve, SVD
+*/
+CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags = DECOMP_LU);
+
+/** @brief Solves one or more linear systems or least-squares problems.
+
+The function solve solves a linear system or least-squares problem (the
+latter is possible with SVD or QR methods, or by specifying the flag
+DECOMP_NORMAL ):
+\f[\texttt{dst} = \arg \min _X \| \texttt{src1} \cdot \texttt{X} - \texttt{src2} \|\f]
+
+If DECOMP_LU or DECOMP_CHOLESKY method is used, the function returns 1
+if src1 (or \f$\texttt{src1}^T\texttt{src1}\f$ ) is non-singular. Otherwise,
+it returns 0. In the latter case, dst is not valid. Other methods find a
+pseudo-solution in case of a singular left-hand side part.
+
+@note If you want to find a unity-norm solution of an under-defined
+singular system \f$\texttt{src1}\cdot\texttt{dst}=0\f$ , the function solve
+will not do the work. Use SVD::solveZ instead.
+
+@param src1 input matrix on the left-hand side of the system.
+@param src2 input matrix on the right-hand side of the system.
+@param dst output solution.
+@param flags solution (matrix inversion) method (cv::DecompTypes)
+@sa invert, SVD, eigen
+*/
+CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
+ OutputArray dst, int flags = DECOMP_LU);
+
+/** @brief Sorts each row or each column of a matrix.
+
+The function sort sorts each matrix row or each matrix column in
+ascending or descending order. So you should pass two operation flags to
+get desired behaviour. If you want to sort matrix rows or columns
+lexicographically, you can use STL std::sort generic function with the
+proper comparison predicate.
+
+@param src input single-channel array.
+@param dst output array of the same size and type as src.
+@param flags operation flags, a combination of cv::SortFlags
+@sa sortIdx, randShuffle
+*/
+CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);
+
+/** @brief Sorts each row or each column of a matrix.
+
+The function sortIdx sorts each matrix row or each matrix column in the
+ascending or descending order. So you should pass two operation flags to
+get desired behaviour. Instead of reordering the elements themselves, it
+stores the indices of sorted elements in the output array. For example:
+@code
+ Mat A = Mat::eye(3,3,CV_32F), B;
+ sortIdx(A, B, SORT_EVERY_ROW + SORT_ASCENDING);
+ // B will probably contain
+ // (because of equal elements in A some permutations are possible):
+ // [[1, 2, 0], [0, 2, 1], [0, 1, 2]]
+@endcode
+@param src input single-channel array.
+@param dst output integer array of the same size as src.
+@param flags operation flags that could be a combination of cv::SortFlags
+@sa sort, randShuffle
+*/
+CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags);
+
+/** @brief Finds the real roots of a cubic equation.
+
+The function solveCubic finds the real roots of a cubic equation:
+- if coeffs is a 4-element vector:
+\f[\texttt{coeffs} [0] x^3 + \texttt{coeffs} [1] x^2 + \texttt{coeffs} [2] x + \texttt{coeffs} [3] = 0\f]
+- if coeffs is a 3-element vector:
+\f[x^3 + \texttt{coeffs} [0] x^2 + \texttt{coeffs} [1] x + \texttt{coeffs} [2] = 0\f]
+
+The roots are stored in the roots array.
+@param coeffs equation coefficients, an array of 3 or 4 elements.
+@param roots output array of real roots that has 1 or 3 elements.
+*/
+CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots);
+
+/** @brief Finds the real or complex roots of a polynomial equation.
+
+The function solvePoly finds real and complex roots of a polynomial equation:
+\f[\texttt{coeffs} [n] x^{n} + \texttt{coeffs} [n-1] x^{n-1} + ... + \texttt{coeffs} [1] x + \texttt{coeffs} [0] = 0\f]
+@param coeffs array of polynomial coefficients.
+@param roots output (complex) array of roots.
+@param maxIters maximum number of iterations the algorithm does.
+*/
+CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters = 300);
+
+/** @brief Calculates eigenvalues and eigenvectors of a symmetric matrix.
+
+The functions eigen calculate just eigenvalues, or eigenvalues and eigenvectors of the symmetric
+matrix src:
+@code
+ src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()
+@endcode
+@note in the new and the old interfaces different ordering of eigenvalues and eigenvectors
+parameters is used.
+@param src input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical
+(src ^T^ == src).
+@param eigenvalues output vector of eigenvalues of the same type as src; the eigenvalues are stored
+in the descending order.
+@param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the
+eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding
+eigenvalues.
+@sa completeSymm , PCA
+*/
+CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues,
+ OutputArray eigenvectors = noArray());
+
+/** @brief Calculates the covariance matrix of a set of vectors.
+
+The functions calcCovarMatrix calculate the covariance matrix and, optionally, the mean vector of
+the set of input vectors.
+@param samples samples stored as separate matrices
+@param nsamples number of samples
+@param covar output covariance matrix of the type ctype and square size.
+@param mean input or output (depending on the flags) array as the average value of the input vectors.
+@param flags operation flags as a combination of cv::CovarFlags
+@param ctype type of the matrixl; it equals 'CV_64F' by default.
+@sa PCA, mulTransposed, Mahalanobis
+@todo InputArrayOfArrays
+*/
+CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean,
+ int flags, int ctype = CV_64F);
+
+/** @overload
+@note use cv::COVAR_ROWS or cv::COVAR_COLS flag
+@param samples samples stored as rows/columns of a single matrix.
+@param covar output covariance matrix of the type ctype and square size.
+@param mean input or output (depending on the flags) array as the average value of the input vectors.
+@param flags operation flags as a combination of cv::CovarFlags
+@param ctype type of the matrixl; it equals 'CV_64F' by default.
+*/
+CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar,
+ InputOutputArray mean, int flags, int ctype = CV_64F);
+
+/** wrap PCA::operator() */
+CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,
+ OutputArray eigenvectors, int maxComponents = 0);
+
+/** wrap PCA::operator() */
+CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,
+ OutputArray eigenvectors, double retainedVariance);
+
+/** wrap PCA::project */
+CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean,
+ InputArray eigenvectors, OutputArray result);
+
+/** wrap PCA::backProject */
+CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean,
+ InputArray eigenvectors, OutputArray result);
+
+/** wrap SVD::compute */
+CV_EXPORTS_W void SVDecomp( InputArray src, OutputArray w, OutputArray u, OutputArray vt, int flags = 0 );
+
+/** wrap SVD::backSubst */
+CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt,
+ InputArray rhs, OutputArray dst );
+
+/** @brief Calculates the Mahalanobis distance between two vectors.
+
+The function Mahalanobis calculates and returns the weighted distance between two vectors:
+\f[d( \texttt{vec1} , \texttt{vec2} )= \sqrt{\sum_{i,j}{\texttt{icovar(i,j)}\cdot(\texttt{vec1}(I)-\texttt{vec2}(I))\cdot(\texttt{vec1(j)}-\texttt{vec2(j)})} }\f]
+The covariance matrix may be calculated using the cv::calcCovarMatrix function and then inverted using
+the invert function (preferably using the cv::DECOMP_SVD method, as the most accurate).
+@param v1 first 1D input vector.
+@param v2 second 1D input vector.
+@param icovar inverse covariance matrix.
+*/
+CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar);
+
+/** @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.
+
+The function performs one of the following:
+- Forward the Fourier transform of a 1D vector of N elements:
+ \f[Y = F^{(N)} \cdot X,\f]
+ where \f$F^{(N)}_{jk}=\exp(-2\pi i j k/N)\f$ and \f$i=\sqrt{-1}\f$
+- Inverse the Fourier transform of a 1D vector of N elements:
+ \f[\begin{array}{l} X'= \left (F^{(N)} \right )^{-1} \cdot Y = \left (F^{(N)} \right )^* \cdot y \\ X = (1/N) \cdot X, \end{array}\f]
+ where \f$F^*=\left(\textrm{Re}(F^{(N)})-\textrm{Im}(F^{(N)})\right)^T\f$
+- Forward the 2D Fourier transform of a M x N matrix:
+ \f[Y = F^{(M)} \cdot X \cdot F^{(N)}\f]
+- Inverse the 2D Fourier transform of a M x N matrix:
+ \f[\begin{array}{l} X'= \left (F^{(M)} \right )^* \cdot Y \cdot \left (F^{(N)} \right )^* \\ X = \frac{1}{M \cdot N} \cdot X' \end{array}\f]
+
+In case of real (single-channel) data, the output spectrum of the forward Fourier transform or input
+spectrum of the inverse Fourier transform can be represented in a packed format called *CCS*
+(complex-conjugate-symmetrical). It was borrowed from IPL (Intel\* Image Processing Library). Here
+is how 2D *CCS* spectrum looks:
+\f[\begin{bmatrix} Re Y_{0,0} & Re Y_{0,1} & Im Y_{0,1} & Re Y_{0,2} & Im Y_{0,2} & \cdots & Re Y_{0,N/2-1} & Im Y_{0,N/2-1} & Re Y_{0,N/2} \\ Re Y_{1,0} & Re Y_{1,1} & Im Y_{1,1} & Re Y_{1,2} & Im Y_{1,2} & \cdots & Re Y_{1,N/2-1} & Im Y_{1,N/2-1} & Re Y_{1,N/2} \\ Im Y_{1,0} & Re Y_{2,1} & Im Y_{2,1} & Re Y_{2,2} & Im Y_{2,2} & \cdots & Re Y_{2,N/2-1} & Im Y_{2,N/2-1} & Im Y_{1,N/2} \\ \hdotsfor{9} \\ Re Y_{M/2-1,0} & Re Y_{M-3,1} & Im Y_{M-3,1} & \hdotsfor{3} & Re Y_{M-3,N/2-1} & Im Y_{M-3,N/2-1}& Re Y_{M/2-1,N/2} \\ Im Y_{M/2-1,0} & Re Y_{M-2,1} & Im Y_{M-2,1} & \hdotsfor{3} & Re Y_{M-2,N/2-1} & Im Y_{M-2,N/2-1}& Im Y_{M/2-1,N/2} \\ Re Y_{M/2,0} & Re Y_{M-1,1} & Im Y_{M-1,1} & \hdotsfor{3} & Re Y_{M-1,N/2-1} & Im Y_{M-1,N/2-1}& Re Y_{M/2,N/2} \end{bmatrix}\f]
+
+In case of 1D transform of a real vector, the output looks like the first row of the matrix above.
+
+So, the function chooses an operation mode depending on the flags and size of the input array:
+- If DFT_ROWS is set or the input array has a single row or single column, the function
+ performs a 1D forward or inverse transform of each row of a matrix when DFT_ROWS is set.
+ Otherwise, it performs a 2D transform.
+- If the input array is real and DFT_INVERSE is not set, the function performs a forward 1D or
+ 2D transform:
+ - When DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as
+ input.
+ - When DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as
+ input. In case of 2D transform, it uses the packed format as shown above. In case of a
+ single 1D transform, it looks like the first row of the matrix above. In case of
+ multiple 1D transforms (when using the DFT_ROWS flag), each row of the output matrix
+ looks like the first row of the matrix above.
+- If the input array is complex and either DFT_INVERSE or DFT_REAL_OUTPUT are not set, the
+ output is a complex array of the same size as input. The function performs a forward or
+ inverse 1D or 2D transform of the whole input array or each row of the input array
+ independently, depending on the flags DFT_INVERSE and DFT_ROWS.
+- When DFT_INVERSE is set and the input array is real, or it is complex but DFT_REAL_OUTPUT
+ is set, the output is a real array of the same size as input. The function performs a 1D or 2D
+ inverse transformation of the whole input array or each individual row, depending on the flags
+ DFT_INVERSE and DFT_ROWS.
+
+If DFT_SCALE is set, the scaling is done after the transformation.
+
+Unlike dct , the function supports arrays of arbitrary size. But only those arrays are processed
+efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the
+current implementation). Such an efficient DFT size can be calculated using the getOptimalDFTSize
+method.
+
+The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:
+@code
+ void convolveDFT(InputArray A, InputArray B, OutputArray C)
+ {
+ // reallocate the output array if needed
+ C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());
+ Size dftSize;
+ // calculate the size of DFT transform
+ dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);
+ dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);
+
+ // allocate temporary buffers and initialize them with 0's
+ Mat tempA(dftSize, A.type(), Scalar::all(0));
+ Mat tempB(dftSize, B.type(), Scalar::all(0));
+
+ // copy A and B to the top-left corners of tempA and tempB, respectively
+ Mat roiA(tempA, Rect(0,0,A.cols,A.rows));
+ A.copyTo(roiA);
+ Mat roiB(tempB, Rect(0,0,B.cols,B.rows));
+ B.copyTo(roiB);
+
+ // now transform the padded A & B in-place;
+ // use "nonzeroRows" hint for faster processing
+ dft(tempA, tempA, 0, A.rows);
+ dft(tempB, tempB, 0, B.rows);
+
+ // multiply the spectrums;
+ // the function handles packed spectrum representations well
+ mulSpectrums(tempA, tempB, tempA);
+
+ // transform the product back from the frequency domain.
+ // Even though all the result rows will be non-zero,
+ // you need only the first C.rows of them, and thus you
+ // pass nonzeroRows == C.rows
+ dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);
+
+ // now copy the result back to C.
+ tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);
+
+ // all the temporary buffers will be deallocated automatically
+ }
+@endcode
+To optimize this sample, consider the following approaches:
+- Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to
+ the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole
+ tempA and tempB. It is only necessary to clear the tempA.cols - A.cols ( tempB.cols - B.cols)
+ rightmost columns of the matrices.
+- This DFT-based convolution does not have to be applied to the whole big arrays, especially if B
+ is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts.
+ To do this, you need to split the output array C into multiple tiles. For each tile, estimate
+ which parts of A and B are required to calculate convolution in this tile. If the tiles in C are
+ too small, the speed will decrease a lot because of repeated work. In the ultimate case, when
+ each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution
+ algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and
+ there is also a slowdown because of bad cache locality. So, there is an optimal tile size
+ somewhere in the middle.
+- If different tiles in C can be calculated in parallel and, thus, the convolution is done by
+ parts, the loop can be threaded.
+
+All of the above improvements have been implemented in matchTemplate and filter2D . Therefore, by
+using them, you can get the performance even better than with the above theoretically optimal
+implementation. Though, those two functions actually calculate cross-correlation, not convolution,
+so you need to "flip" the second convolution operand B vertically and horizontally using flip .
+@note
+- An example using the discrete fourier transform can be found at
+ opencv_source_code/samples/cpp/dft.cpp
+- (Python) An example using the dft functionality to perform Wiener deconvolution can be found
+ at opencv_source/samples/python/deconvolution.py
+- (Python) An example rearranging the quadrants of a Fourier image can be found at
+ opencv_source/samples/python/dft.py
+@param src input array that could be real or complex.
+@param dst output array whose size and type depends on the flags .
+@param flags transformation flags, representing a combination of the cv::DftFlags
+@param nonzeroRows when the parameter is not zero, the function assumes that only the first
+nonzeroRows rows of the input array (DFT_INVERSE is not set) or only the first nonzeroRows of the
+output array (DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the
+rows more efficiently and save some time; this technique is very useful for calculating array
+cross-correlation or convolution using DFT.
+@sa dct , getOptimalDFTSize , mulSpectrums, filter2D , matchTemplate , flip , cartToPolar ,
+magnitude , phase
+*/
+CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);
+
+/** @brief Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.
+
+idft(src, dst, flags) is equivalent to dft(src, dst, flags | DFT_INVERSE) .
+@note None of dft and idft scales the result by default. So, you should pass DFT_SCALE to one of
+dft or idft explicitly to make these transforms mutually inverse.
+@sa dft, dct, idct, mulSpectrums, getOptimalDFTSize
+@param src input floating-point real or complex array.
+@param dst output array whose size and type depend on the flags.
+@param flags operation flags (see dft and cv::DftFlags).
+@param nonzeroRows number of dst rows to process; the rest of the rows have undefined content (see
+the convolution sample in dft description.
+*/
+CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);
+
+/** @brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
+
+The function dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D
+floating-point array:
+- Forward Cosine transform of a 1D vector of N elements:
+ \f[Y = C^{(N)} \cdot X\f]
+ where
+ \f[C^{(N)}_{jk}= \sqrt{\alpha_j/N} \cos \left ( \frac{\pi(2k+1)j}{2N} \right )\f]
+ and
+ \f$\alpha_0=1\f$, \f$\alpha_j=2\f$ for *j \> 0*.
+- Inverse Cosine transform of a 1D vector of N elements:
+ \f[X = \left (C^{(N)} \right )^{-1} \cdot Y = \left (C^{(N)} \right )^T \cdot Y\f]
+ (since \f$C^{(N)}\f$ is an orthogonal matrix, \f$C^{(N)} \cdot \left(C^{(N)}\right)^T = I\f$ )
+- Forward 2D Cosine transform of M x N matrix:
+ \f[Y = C^{(N)} \cdot X \cdot \left (C^{(N)} \right )^T\f]
+- Inverse 2D Cosine transform of M x N matrix:
+ \f[X = \left (C^{(N)} \right )^T \cdot X \cdot C^{(N)}\f]
+
+The function chooses the mode of operation by looking at the flags and size of the input array:
+- If (flags & DCT_INVERSE) == 0 , the function does a forward 1D or 2D transform. Otherwise, it
+ is an inverse 1D or 2D transform.
+- If (flags & DCT_ROWS) != 0 , the function performs a 1D transform of each row.
+- If the array is a single column or a single row, the function performs a 1D transform.
+- If none of the above is true, the function performs a 2D transform.
+
+@note Currently dct supports even-size arrays (2, 4, 6 ...). For data analysis and approximation, you
+can pad the array when necessary.
+Also, the function performance depends very much, and not monotonically, on the array size (see
+getOptimalDFTSize ). In the current implementation DCT of a vector of size N is calculated via DFT
+of a vector of size N/2 . Thus, the optimal DCT size N1 \>= N can be calculated as:
+@code
+ size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }
+ N1 = getOptimalDCTSize(N);
+@endcode
+@param src input floating-point array.
+@param dst output array of the same size and type as src .
+@param flags transformation flags as a combination of cv::DftFlags (DCT_*)
+@sa dft , getOptimalDFTSize , idct
+*/
+CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags = 0);
+
+/** @brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.
+
+idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).
+@param src input floating-point single-channel array.
+@param dst output array of the same size and type as src.
+@param flags operation flags.
+@sa dct, dft, idft, getOptimalDFTSize
+*/
+CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags = 0);
+
+/** @brief Performs the per-element multiplication of two Fourier spectrums.
+
+The function mulSpectrums performs the per-element multiplication of the two CCS-packed or complex
+matrices that are results of a real or complex Fourier transform.
+
+The function, together with dft and idft , may be used to calculate convolution (pass conjB=false )
+or correlation (pass conjB=true ) of two arrays rapidly. When the arrays are complex, they are
+simply multiplied (per element) with an optional conjugation of the second-array elements. When the
+arrays are real, they are assumed to be CCS-packed (see dft for details).
+@param a first input array.
+@param b second input array of the same size and type as src1 .
+@param c output array of the same size and type as src1 .
+@param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that
+each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.
+@param conjB optional flag that conjugates the second input array before the multiplication (true)
+or not (false).
+*/
+CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c,
+ int flags, bool conjB = false);
+
+/** @brief Returns the optimal DFT size for a given vector size.
+
+DFT performance is not a monotonic function of a vector size. Therefore, when you calculate
+convolution of two arrays or perform the spectral analysis of an array, it usually makes sense to
+pad the input data with zeros to get a bit larger array that can be transformed much faster than the
+original one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32, ...) are the fastest to process.
+Though, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5\*5\*3\*2\*2)
+are also processed quite efficiently.
+
+The function getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize
+so that the DFT of a vector of size N can be processed efficiently. In the current implementation N
+= 2 ^p^ \* 3 ^q^ \* 5 ^r^ for some integer p, q, r.
+
+The function returns a negative number if vecsize is too large (very close to INT_MAX ).
+
+While the function cannot be used directly to estimate the optimal vector size for DCT transform
+(since the current DCT implementation supports only even-size vectors), it can be easily processed
+as getOptimalDFTSize((vecsize+1)/2)\*2.
+@param vecsize vector size.
+@sa dft , dct , idft , idct , mulSpectrums
+*/
+CV_EXPORTS_W int getOptimalDFTSize(int vecsize);
+
+/** @brief Returns the default random number generator.
+
+The function theRNG returns the default random number generator. For each thread, there is a
+separate random number generator, so you can use the function safely in multi-thread environments.
+If you just need to get a single random number using this generator or initialize an array, you can
+use randu or randn instead. But if you are going to generate many random numbers inside a loop, it
+is much faster to use this function to retrieve the generator and then use RNG::operator _Tp() .
+@sa RNG, randu, randn
+*/
+CV_EXPORTS RNG& theRNG();
+
+/** @brief Generates a single uniformly-distributed random number or an array of random numbers.
+
+Non-template variant of the function fills the matrix dst with uniformly-distributed
+random numbers from the specified range:
+\f[\texttt{low} _c \leq \texttt{dst} (I)_c < \texttt{high} _c\f]
+@param dst output array of random numbers; the array must be pre-allocated.
+@param low inclusive lower boundary of the generated random numbers.
+@param high exclusive upper boundary of the generated random numbers.
+@sa RNG, randn, theRNG
+*/
+CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high);
+
+/** @brief Fills the array with normally distributed random numbers.
+
+The function randn fills the matrix dst with normally distributed random numbers with the specified
+mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the
+value range of the output array data type.
+@param dst output array of random numbers; the array must be pre-allocated and have 1 to 4 channels.
+@param mean mean value (expectation) of the generated random numbers.
+@param stddev standard deviation of the generated random numbers; it can be either a vector (in
+which case a diagonal standard deviation matrix is assumed) or a square matrix.
+@sa RNG, randu
+*/
+CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev);
+
+/** @brief Shuffles the array elements randomly.
+
+The function randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and
+swapping them. The number of such swap operations will be dst.rows\*dst.cols\*iterFactor .
+@param dst input/output numerical 1D array.
+@param iterFactor scale factor that determines the number of random swap operations (see the details
+below).
+@param rng optional random number generator used for shuffling; if it is zero, theRNG () is used
+instead.
+@sa RNG, sort
+*/
+CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);
+
+/** @brief Principal Component Analysis
+
+The class is used to calculate a special basis for a set of vectors. The
+basis will consist of eigenvectors of the covariance matrix calculated
+from the input set of vectors. The class %PCA can also transform
+vectors to/from the new coordinate space defined by the basis. Usually,
+in this new coordinate system, each vector from the original set (and
+any linear combination of such vectors) can be quite accurately
+approximated by taking its first few components, corresponding to the
+eigenvectors of the largest eigenvalues of the covariance matrix.
+Geometrically it means that you calculate a projection of the vector to
+a subspace formed by a few eigenvectors corresponding to the dominant
+eigenvalues of the covariance matrix. And usually such a projection is
+very close to the original vector. So, you can represent the original
+vector from a high-dimensional space with a much shorter vector
+consisting of the projected vector's coordinates in the subspace. Such a
+transformation is also known as Karhunen-Loeve Transform, or KLT.
+See http://en.wikipedia.org/wiki/Principal_component_analysis
+
+The sample below is the function that takes two matrices. The first
+function stores a set of vectors (a row per vector) that is used to
+calculate PCA. The second function stores another "test" set of vectors
+(a row per vector). First, these vectors are compressed with PCA, then
+reconstructed back, and then the reconstruction error norm is computed
+and printed for each vector. :
+
+@code{.cpp}
+using namespace cv;
+
+PCA compressPCA(const Mat& pcaset, int maxComponents,
+ const Mat& testset, Mat& compressed)
+{
+ PCA pca(pcaset, // pass the data
+ Mat(), // we do not have a pre-computed mean vector,
+ // so let the PCA engine to compute it
+ PCA::DATA_AS_ROW, // indicate that the vectors
+ // are stored as matrix rows
+ // (use PCA::DATA_AS_COL if the vectors are
+ // the matrix columns)
+ maxComponents // specify, how many principal components to retain
+ );
+ // if there is no test data, just return the computed basis, ready-to-use
+ if( !testset.data )
+ return pca;
+ CV_Assert( testset.cols == pcaset.cols );
+
+ compressed.create(testset.rows, maxComponents, testset.type());
+
+ Mat reconstructed;
+ for( int i = 0; i < testset.rows; i++ )
+ {
+ Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed;
+ // compress the vector, the result will be stored
+ // in the i-th row of the output matrix
+ pca.project(vec, coeffs);
+ // and then reconstruct it
+ pca.backProject(coeffs, reconstructed);
+ // and measure the error
+ printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2));
+ }
+ return pca;
+}
+@endcode
+@sa calcCovarMatrix, mulTransposed, SVD, dft, dct
+*/
+class CV_EXPORTS PCA
+{
+public:
+ enum Flags { DATA_AS_ROW = 0, //!< indicates that the input samples are stored as matrix rows
+ DATA_AS_COL = 1, //!< indicates that the input samples are stored as matrix columns
+ USE_AVG = 2 //!
+ };
+
+ /** @brief default constructor
+
+ The default constructor initializes an empty %PCA structure. The other
+ constructors initialize the structure and call PCA::operator()().
+ */
+ PCA();
+
+ /** @overload
+ @param data input samples stored as matrix rows or matrix columns.
+ @param mean optional mean value; if the matrix is empty (@c noArray()),
+ the mean is computed from the data.
+ @param flags operation flags; currently the parameter is only used to
+ specify the data layout (PCA::Flags)
+ @param maxComponents maximum number of components that %PCA should
+ retain; by default, all the components are retained.
+ */
+ PCA(InputArray data, InputArray mean, int flags, int maxComponents = 0);
+
+ /** @overload
+ @param data input samples stored as matrix rows or matrix columns.
+ @param mean optional mean value; if the matrix is empty (noArray()),
+ the mean is computed from the data.
+ @param flags operation flags; currently the parameter is only used to
+ specify the data layout (PCA::Flags)
+ @param retainedVariance Percentage of variance that PCA should retain.
+ Using this parameter will let the PCA decided how many components to
+ retain but it will always keep at least 2.
+ */
+ PCA(InputArray data, InputArray mean, int flags, double retainedVariance);
+
+ /** @brief performs %PCA
+
+ The operator performs %PCA of the supplied dataset. It is safe to reuse
+ the same PCA structure for multiple datasets. That is, if the structure
+ has been previously used with another dataset, the existing internal
+ data is reclaimed and the new eigenvalues, @ref eigenvectors , and @ref
+ mean are allocated and computed.
+
+ The computed eigenvalues are sorted from the largest to the smallest and
+ the corresponding eigenvectors are stored as eigenvectors rows.
+
+ @param data input samples stored as the matrix rows or as the matrix
+ columns.
+ @param mean optional mean value; if the matrix is empty (noArray()),
+ the mean is computed from the data.
+ @param flags operation flags; currently the parameter is only used to
+ specify the data layout. (Flags)
+ @param maxComponents maximum number of components that PCA should
+ retain; by default, all the components are retained.
+ */
+ PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents = 0);
+
+ /** @overload
+ @param data input samples stored as the matrix rows or as the matrix
+ columns.
+ @param mean optional mean value; if the matrix is empty (noArray()),
+ the mean is computed from the data.
+ @param flags operation flags; currently the parameter is only used to
+ specify the data layout. (PCA::Flags)
+ @param retainedVariance Percentage of variance that %PCA should retain.
+ Using this parameter will let the %PCA decided how many components to
+ retain but it will always keep at least 2.
+ */
+ PCA& operator()(InputArray data, InputArray mean, int flags, double retainedVariance);
+
+ /** @brief Projects vector(s) to the principal component subspace.
+
+ The methods project one or more vectors to the principal component
+ subspace, where each vector projection is represented by coefficients in
+ the principal component basis. The first form of the method returns the
+ matrix that the second form writes to the result. So the first form can
+ be used as a part of expression while the second form can be more
+ efficient in a processing loop.
+ @param vec input vector(s); must have the same dimensionality and the
+ same layout as the input data used at %PCA phase, that is, if
+ DATA_AS_ROW are specified, then `vec.cols==data.cols`
+ (vector dimensionality) and `vec.rows` is the number of vectors to
+ project, and the same is true for the PCA::DATA_AS_COL case.
+ */
+ Mat project(InputArray vec) const;
+
+ /** @overload
+ @param vec input vector(s); must have the same dimensionality and the
+ same layout as the input data used at PCA phase, that is, if
+ DATA_AS_ROW are specified, then `vec.cols==data.cols`
+ (vector dimensionality) and `vec.rows` is the number of vectors to
+ project, and the same is true for the PCA::DATA_AS_COL case.
+ @param result output vectors; in case of PCA::DATA_AS_COL, the
+ output matrix has as many columns as the number of input vectors, this
+ means that `result.cols==vec.cols` and the number of rows match the
+ number of principal components (for example, `maxComponents` parameter
+ passed to the constructor).
+ */
+ void project(InputArray vec, OutputArray result) const;
+
+ /** @brief Reconstructs vectors from their PC projections.
+
+ The methods are inverse operations to PCA::project. They take PC
+ coordinates of projected vectors and reconstruct the original vectors.
+ Unless all the principal components have been retained, the
+ reconstructed vectors are different from the originals. But typically,
+ the difference is small if the number of components is large enough (but
+ still much smaller than the original vector dimensionality). As a
+ result, PCA is used.
+ @param vec coordinates of the vectors in the principal component
+ subspace, the layout and size are the same as of PCA::project output
+ vectors.
+ */
+ Mat backProject(InputArray vec) const;
+
+ /** @overload
+ @param vec coordinates of the vectors in the principal component
+ subspace, the layout and size are the same as of PCA::project output
+ vectors.
+ @param result reconstructed vectors; the layout and size are the same as
+ of PCA::project input vectors.
+ */
+ void backProject(InputArray vec, OutputArray result) const;
+
+ /** @brief write and load PCA matrix
+
+*/
+ void write(FileStorage& fs ) const;
+ void read(const FileNode& fs);
+
+ Mat eigenvectors; //!< eigenvectors of the covariation matrix
+ Mat eigenvalues; //!< eigenvalues of the covariation matrix
+ Mat mean; //!< mean value subtracted before the projection and added after the back projection
+};
+
+/** @example pca.cpp
+ An example using %PCA for dimensionality reduction while maintaining an amount of variance
+ */
+
+/**
+ @brief Linear Discriminant Analysis
+ @todo document this class
+ */
+class CV_EXPORTS LDA
+{
+public:
+ /** @brief constructor
+ Initializes a LDA with num_components (default 0).
+ */
+ explicit LDA(int num_components = 0);
+
+ /** Initializes and performs a Discriminant Analysis with Fisher's
+ Optimization Criterion on given data in src and corresponding labels
+ in labels. If 0 (or less) number of components are given, they are
+ automatically determined for given data in computation.
+ */
+ LDA(InputArrayOfArrays src, InputArray labels, int num_components = 0);
+
+ /** Serializes this object to a given filename.
+ */
+ void save(const String& filename) const;
+
+ /** Deserializes this object from a given filename.
+ */
+ void load(const String& filename);
+
+ /** Serializes this object to a given cv::FileStorage.
+ */
+ void save(FileStorage& fs) const;
+
+ /** Deserializes this object from a given cv::FileStorage.
+ */
+ void load(const FileStorage& node);
+
+ /** destructor
+ */
+ ~LDA();
+
+ /** Compute the discriminants for data in src (row aligned) and labels.
+ */
+ void compute(InputArrayOfArrays src, InputArray labels);
+
+ /** Projects samples into the LDA subspace.
+ src may be one or more row aligned samples.
+ */
+ Mat project(InputArray src);
+
+ /** Reconstructs projections from the LDA subspace.
+ src may be one or more row aligned projections.
+ */
+ Mat reconstruct(InputArray src);
+
+ /** Returns the eigenvectors of this LDA.
+ */
+ Mat eigenvectors() const { return _eigenvectors; }
+
+ /** Returns the eigenvalues of this LDA.
+ */
+ Mat eigenvalues() const { return _eigenvalues; }
+
+ static Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
+ static Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
+
+protected:
+ bool _dataAsRow; // unused, but needed for 3.0 ABI compatibility.
+ int _num_components;
+ Mat _eigenvectors;
+ Mat _eigenvalues;
+ void lda(InputArrayOfArrays src, InputArray labels);
+};
+
+/** @brief Singular Value Decomposition
+
+Class for computing Singular Value Decomposition of a floating-point
+matrix. The Singular Value Decomposition is used to solve least-square
+problems, under-determined linear systems, invert matrices, compute
+condition numbers, and so on.
+
+If you want to compute a condition number of a matrix or an absolute value of
+its determinant, you do not need `u` and `vt`. You can pass
+flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that full-size u
+and vt must be computed, which is not necessary most of the time.
+
+@sa invert, solve, eigen, determinant
+*/
+class CV_EXPORTS SVD
+{
+public:
+ enum Flags {
+ /** allow the algorithm to modify the decomposed matrix; it can save space and speed up
+ processing. currently ignored. */
+ MODIFY_A = 1,
+ /** indicates that only a vector of singular values `w` is to be processed, while u and vt
+ will be set to empty matrices */
+ NO_UV = 2,
+ /** when the matrix is not square, by default the algorithm produces u and vt matrices of
+ sufficiently large size for the further A reconstruction; if, however, FULL_UV flag is
+ specified, u and vt will be full-size square orthogonal matrices.*/
+ FULL_UV = 4
+ };
+
+ /** @brief the default constructor
+
+ initializes an empty SVD structure
+ */
+ SVD();
+
+ /** @overload
+ initializes an empty SVD structure and then calls SVD::operator()
+ @param src decomposed matrix.
+ @param flags operation flags (SVD::Flags)
+ */
+ SVD( InputArray src, int flags = 0 );
+
+ /** @brief the operator that performs SVD. The previously allocated u, w and vt are released.
+
+ The operator performs the singular value decomposition of the supplied
+ matrix. The u,`vt` , and the vector of singular values w are stored in
+ the structure. The same SVD structure can be reused many times with
+ different matrices. Each time, if needed, the previous u,`vt` , and w
+ are reclaimed and the new matrices are created, which is all handled by
+ Mat::create.
+ @param src decomposed matrix.
+ @param flags operation flags (SVD::Flags)
+ */
+ SVD& operator ()( InputArray src, int flags = 0 );
+
+ /** @brief decomposes matrix and stores the results to user-provided matrices
+
+ The methods/functions perform SVD of matrix. Unlike SVD::SVD constructor
+ and SVD::operator(), they store the results to the user-provided
+ matrices:
+
+ @code{.cpp}
+ Mat A, w, u, vt;
+ SVD::compute(A, w, u, vt);
+ @endcode
+
+ @param src decomposed matrix
+ @param w calculated singular values
+ @param u calculated left singular vectors
+ @param vt transposed matrix of right singular values
+ @param flags operation flags - see SVD::SVD.
+ */
+ static void compute( InputArray src, OutputArray w,
+ OutputArray u, OutputArray vt, int flags = 0 );
+
+ /** @overload
+ computes singular values of a matrix
+ @param src decomposed matrix
+ @param w calculated singular values
+ @param flags operation flags - see SVD::Flags.
+ */
+ static void compute( InputArray src, OutputArray w, int flags = 0 );
+
+ /** @brief performs back substitution
+ */
+ static void backSubst( InputArray w, InputArray u,
+ InputArray vt, InputArray rhs,
+ OutputArray dst );
+
+ /** @brief solves an under-determined singular linear system
+
+ The method finds a unit-length solution x of a singular linear system
+ A\*x = 0. Depending on the rank of A, there can be no solutions, a
+ single solution or an infinite number of solutions. In general, the
+ algorithm solves the following problem:
+ \f[dst = \arg \min _{x: \| x \| =1} \| src \cdot x \|\f]
+ @param src left-hand-side matrix.
+ @param dst found solution.
+ */
+ static void solveZ( InputArray src, OutputArray dst );
+
+ /** @brief performs a singular value back substitution.
+
+ The method calculates a back substitution for the specified right-hand
+ side:
+
+ \f[\texttt{x} = \texttt{vt} ^T \cdot diag( \texttt{w} )^{-1} \cdot \texttt{u} ^T \cdot \texttt{rhs} \sim \texttt{A} ^{-1} \cdot \texttt{rhs}\f]
+
+ Using this technique you can either get a very accurate solution of the
+ convenient linear system, or the best (in the least-squares terms)
+ pseudo-solution of an overdetermined linear system.
+
+ @param rhs right-hand side of a linear system (u\*w\*v')\*dst = rhs to
+ be solved, where A has been previously decomposed.
+
+ @param dst found solution of the system.
+
+ @note Explicit SVD with the further back substitution only makes sense
+ if you need to solve many linear systems with the same left-hand side
+ (for example, src ). If all you need is to solve a single system
+ (possibly with multiple rhs immediately available), simply call solve
+ add pass DECOMP_SVD there. It does absolutely the same thing.
+ */
+ void backSubst( InputArray rhs, OutputArray dst ) const;
+
+ /** @todo document */
+ template static
+ void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt );
+
+ /** @todo document */
+ template static
+ void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w );
+
+ /** @todo document */
+ template static
+ void backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst );
+
+ Mat u, w, vt;
+};
+
+/** @brief Random Number Generator
+
+Random number generator. It encapsulates the state (currently, a 64-bit
+integer) and has methods to return scalar random values and to fill
+arrays with random values. Currently it supports uniform and Gaussian
+(normal) distributions. The generator uses Multiply-With-Carry
+algorithm, introduced by G. Marsaglia (
+ ).
+Gaussian-distribution random numbers are generated using the Ziggurat
+algorithm ( ),
+introduced by G. Marsaglia and W. W. Tsang.
+*/
+class CV_EXPORTS RNG
+{
+public:
+ enum { UNIFORM = 0,
+ NORMAL = 1
+ };
+
+ /** @brief constructor
+
+ These are the RNG constructors. The first form sets the state to some
+ pre-defined value, equal to 2\*\*32-1 in the current implementation. The
+ second form sets the state to the specified value. If you passed state=0
+ , the constructor uses the above default value instead to avoid the
+ singular random number sequence, consisting of all zeros.
+ */
+ RNG();
+ /** @overload
+ @param state 64-bit value used to initialize the RNG.
+ */
+ RNG(uint64 state);
+ /**The method updates the state using the MWC algorithm and returns the
+ next 32-bit random number.*/
+ unsigned next();
+
+ /**Each of the methods updates the state using the MWC algorithm and
+ returns the next random number of the specified type. In case of integer
+ types, the returned number is from the available value range for the
+ specified type. In case of floating-point types, the returned value is
+ from [0,1) range.
+ */
+ operator uchar();
+ /** @overload */
+ operator schar();
+ /** @overload */
+ operator ushort();
+ /** @overload */
+ operator short();
+ /** @overload */
+ operator unsigned();
+ /** @overload */
+ operator int();
+ /** @overload */
+ operator float();
+ /** @overload */
+ operator double();
+
+ /** @brief returns a random integer sampled uniformly from [0, N).
+
+ The methods transform the state using the MWC algorithm and return the
+ next random number. The first form is equivalent to RNG::next . The
+ second form returns the random number modulo N , which means that the
+ result is in the range [0, N) .
+ */
+ unsigned operator ()();
+ /** @overload
+ @param N upper non-inclusive boundary of the returned random number.
+ */
+ unsigned operator ()(unsigned N);
+
+ /** @brief returns uniformly distributed integer random number from [a,b) range
+
+ The methods transform the state using the MWC algorithm and return the
+ next uniformly-distributed random number of the specified type, deduced
+ from the input parameter type, from the range [a, b) . There is a nuance
+ illustrated by the following sample:
+
+ @code{.cpp}
+ RNG rng;
+
+ // always produces 0
+ double a = rng.uniform(0, 1);
+
+ // produces double from [0, 1)
+ double a1 = rng.uniform((double)0, (double)1);
+
+ // produces float from [0, 1)
+ double b = rng.uniform(0.f, 1.f);
+
+ // produces double from [0, 1)
+ double c = rng.uniform(0., 1.);
+
+ // may cause compiler error because of ambiguity:
+ // RNG::uniform(0, (int)0.999999)? or RNG::uniform((double)0, 0.99999)?
+ double d = rng.uniform(0, 0.999999);
+ @endcode
+
+ The compiler does not take into account the type of the variable to
+ which you assign the result of RNG::uniform . The only thing that
+ matters to the compiler is the type of a and b parameters. So, if you
+ want a floating-point random number, but the range boundaries are
+ integer numbers, either put dots in the end, if they are constants, or
+ use explicit type cast operators, as in the a1 initialization above.
+ @param a lower inclusive boundary of the returned random numbers.
+ @param b upper non-inclusive boundary of the returned random numbers.
+ */
+ int uniform(int a, int b);
+ /** @overload */
+ float uniform(float a, float b);
+ /** @overload */
+ double uniform(double a, double b);
+
+ /** @brief Fills arrays with random numbers.
+
+ @param mat 2D or N-dimensional matrix; currently matrices with more than
+ 4 channels are not supported by the methods, use Mat::reshape as a
+ possible workaround.
+ @param distType distribution type, RNG::UNIFORM or RNG::NORMAL.
+ @param a first distribution parameter; in case of the uniform
+ distribution, this is an inclusive lower boundary, in case of the normal
+ distribution, this is a mean value.
+ @param b second distribution parameter; in case of the uniform
+ distribution, this is a non-inclusive upper boundary, in case of the
+ normal distribution, this is a standard deviation (diagonal of the
+ standard deviation matrix or the full standard deviation matrix).
+ @param saturateRange pre-saturation flag; for uniform distribution only;
+ if true, the method will first convert a and b to the acceptable value
+ range (according to the mat datatype) and then will generate uniformly
+ distributed random numbers within the range [saturate(a), saturate(b)),
+ if saturateRange=false, the method will generate uniformly distributed
+ random numbers in the original range [a, b) and then will saturate them,
+ it means, for example, that
+ theRNG().fill(mat_8u, RNG::UNIFORM, -DBL_MAX, DBL_MAX) will likely
+ produce array mostly filled with 0's and 255's, since the range (0, 255)
+ is significantly smaller than [-DBL_MAX, DBL_MAX).
+
+ Each of the methods fills the matrix with the random values from the
+ specified distribution. As the new numbers are generated, the RNG state
+ is updated accordingly. In case of multiple-channel images, every
+ channel is filled independently, which means that RNG cannot generate
+ samples from the multi-dimensional Gaussian distribution with
+ non-diagonal covariance matrix directly. To do that, the method
+ generates samples from multi-dimensional standard Gaussian distribution
+ with zero mean and identity covariation matrix, and then transforms them
+ using transform to get samples from the specified Gaussian distribution.
+ */
+ void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange = false );
+
+ /** @brief Returns the next random number sampled from the Gaussian distribution
+ @param sigma standard deviation of the distribution.
+
+ The method transforms the state using the MWC algorithm and returns the
+ next random number from the Gaussian distribution N(0,sigma) . That is,
+ the mean value of the returned random numbers is zero and the standard
+ deviation is the specified sigma .
+ */
+ double gaussian(double sigma);
+
+ uint64 state;
+};
+
+/** @brief Mersenne Twister random number generator
+
+Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
+@todo document
+ */
+class CV_EXPORTS RNG_MT19937
+{
+public:
+ RNG_MT19937();
+ RNG_MT19937(unsigned s);
+ void seed(unsigned s);
+
+ unsigned next();
+
+ operator int();
+ operator unsigned();
+ operator float();
+ operator double();
+
+ unsigned operator ()(unsigned N);
+ unsigned operator ()();
+
+ /** @brief returns uniformly distributed integer random number from [a,b) range
+
+*/
+ int uniform(int a, int b);
+ /** @brief returns uniformly distributed floating-point random number from [a,b) range
+
+*/
+ float uniform(float a, float b);
+ /** @brief returns uniformly distributed double-precision floating-point random number from [a,b) range
+
+*/
+ double uniform(double a, double b);
+
+private:
+ enum PeriodParameters {N = 624, M = 397};
+ unsigned state[N];
+ int mti;
+};
+
+//! @} core_array
+
+//! @addtogroup core_cluster
+//! @{
+
+/** @example kmeans.cpp
+ An example on K-means clustering
+*/
+
+/** @brief Finds centers of clusters and groups input samples around the clusters.
+
+The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters
+and groups the input samples around the clusters. As an output, \f$\texttt{labels}_i\f$ contains a
+0-based cluster index for the sample stored in the \f$i^{th}\f$ row of the samples matrix.
+
+@note
+- (Python) An example on K-means clustering can be found at
+ opencv_source_code/samples/python/kmeans.py
+@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
+Examples of this array can be:
+- Mat points(count, 2, CV_32F);
+- Mat points(count, 1, CV_32FC2);
+- Mat points(1, count, CV_32FC2);
+- std::vector\ points(sampleCount);
+@param K Number of clusters to split the set by.
+@param bestLabels Input/output integer array that stores the cluster indices for every sample.
+@param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or
+the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster
+centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
+@param attempts Flag to specify the number of times the algorithm is executed using different
+initial labellings. The algorithm returns the labels that yield the best compactness (see the last
+function parameter).
+@param flags Flag that can take values of cv::KmeansFlags
+@param centers Output matrix of the cluster centers, one row per each cluster center.
+@return The function returns the compactness measure that is computed as
+\f[\sum _i \| \texttt{samples} _i - \texttt{centers} _{ \texttt{labels} _i} \| ^2\f]
+after every attempt. The best (minimum) value is chosen and the corresponding labels and the
+compactness value are returned by the function. Basically, you can use only the core of the
+function, set the number of attempts to 1, initialize labels each time using a custom algorithm,
+pass them with the ( flags = KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best
+(most-compact) clustering.
+*/
+CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,
+ TermCriteria criteria, int attempts,
+ int flags, OutputArray centers = noArray() );
+
+//! @} core_cluster
+
+//! @addtogroup core_basic
+//! @{
+
+/////////////////////////////// Formatted output of cv::Mat ///////////////////////////
+
+/** @todo document */
+class CV_EXPORTS Formatted
+{
+public:
+ virtual const char* next() = 0;
+ virtual void reset() = 0;
+ virtual ~Formatted();
+};
+
+/** @todo document */
+class CV_EXPORTS Formatter
+{
+public:
+ enum { FMT_DEFAULT = 0,
+ FMT_MATLAB = 1,
+ FMT_CSV = 2,
+ FMT_PYTHON = 3,
+ FMT_NUMPY = 4,
+ FMT_C = 5
+ };
+
+ virtual ~Formatter();
+
+ virtual Ptr format(const Mat& mtx) const = 0;
+
+ virtual void set32fPrecision(int p = 8) = 0;
+ virtual void set64fPrecision(int p = 16) = 0;
+ virtual void setMultiline(bool ml = true) = 0;
+
+ static Ptr get(int fmt = FMT_DEFAULT);
+
+};
+
+static inline
+String& operator << (String& out, Ptr fmtd)
+{
+ fmtd->reset();
+ for(const char* str = fmtd->next(); str; str = fmtd->next())
+ out += cv::String(str);
+ return out;
+}
+
+static inline
+String& operator << (String& out, const Mat& mtx)
+{
+ return out << Formatter::get()->format(mtx);
+}
+
+//////////////////////////////////////// Algorithm ////////////////////////////////////
+
+class CV_EXPORTS Algorithm;
+
+template struct ParamType {};
+
+
+/** @brief This is a base class for all more or less complex algorithms in OpenCV
+
+especially for classes of algorithms, for which there can be multiple implementations. The examples
+are stereo correspondence (for which there are algorithms like block matching, semi-global block
+matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians
+models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
+etc.).
+
+Here is example of SIFT use in your application via Algorithm interface:
+@code
+ #include "opencv2/opencv.hpp"
+ #include "opencv2/xfeatures2d.hpp"
+ using namespace cv::xfeatures2d;
+
+ Ptr sift = SIFT::create();
+ FileStorage fs("sift_params.xml", FileStorage::READ);
+ if( fs.isOpened() ) // if we have file with parameters, read them
+ {
+ sift->read(fs["sift_params"]);
+ fs.release();
+ }
+ else // else modify the parameters and store them; user can later edit the file to use different parameters
+ {
+ sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
+ {
+ WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
+ sift->write(fs);
+ }
+ }
+ Mat image = imread("myimage.png", 0), descriptors;
+ vector keypoints;
+ sift->detectAndCompute(image, noArray(), keypoints, descriptors);
+@endcode
+ */
+class CV_EXPORTS_W Algorithm
+{
+public:
+ Algorithm();
+ virtual ~Algorithm();
+
+ /** @brief Clears the algorithm state
+ */
+ CV_WRAP virtual void clear() {}
+
+ /** @brief Stores algorithm parameters in a file storage
+ */
+ virtual void write(FileStorage& fs) const { (void)fs; }
+
+ /** @brief Reads algorithm parameters from a file storage
+ */
+ virtual void read(const FileNode& fn) { (void)fn; }
+
+ /** @brief Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read
+ */
+ virtual bool empty() const { return false; }
+
+ /** @brief Reads algorithm from the file node
+
+ This is static template method of Algorithm. It's usage is following (in the case of SVM):
+ @code
+ Ptr svm = Algorithm::read(fn);
+ @endcode
+ In order to make this method work, the derived class must overwrite Algorithm::read(const
+ FileNode& fn) and also have static create() method without parameters
+ (or with all the optional parameters)
+ */
+ template static Ptr<_Tp> read(const FileNode& fn)
+ {
+ Ptr<_Tp> obj = _Tp::create();
+ obj->read(fn);
+ return !obj->empty() ? obj : Ptr<_Tp>();
+ }
+
+ /** @brief Loads algorithm from the file
+
+ @param filename Name of the file to read.
+ @param objname The optional name of the node to read (if empty, the first top-level node will be used)
+
+ This is static template method of Algorithm. It's usage is following (in the case of SVM):
+ @code
+ Ptr svm = Algorithm::load("my_svm_model.xml");
+ @endcode
+ In order to make this method work, the derived class must overwrite Algorithm::read(const
+ FileNode& fn).
+ */
+ template static Ptr<_Tp> load(const String& filename, const String& objname=String())
+ {
+ FileStorage fs(filename, FileStorage::READ);
+ FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];
+ Ptr<_Tp> obj = _Tp::create();
+ obj->read(fn);
+ return !obj->empty() ? obj : Ptr<_Tp>();
+ }
+
+ /** @brief Loads algorithm from a String
+
+ @param strModel The string variable containing the model you want to load.
+ @param objname The optional name of the node to read (if empty, the first top-level node will be used)
+
+ This is static template method of Algorithm. It's usage is following (in the case of SVM):
+ @code
+ Ptr svm = Algorithm::loadFromString(myStringModel);
+ @endcode
+ */
+ template static Ptr<_Tp> loadFromString(const String& strModel, const String& objname=String())
+ {
+ FileStorage fs(strModel, FileStorage::READ + FileStorage::MEMORY);
+ FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];
+ Ptr<_Tp> obj = _Tp::create();
+ obj->read(fn);
+ return !obj->empty() ? obj : Ptr<_Tp>();
+ }
+
+ /** Saves the algorithm to a file.
+ In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */
+ CV_WRAP virtual void save(const String& filename) const;
+
+ /** Returns the algorithm string identifier.
+ This string is used as top level xml/yml node tag when the object is saved to a file or string. */
+ CV_WRAP virtual String getDefaultName() const;
+};
+
+struct Param {
+ enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
+ UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
+};
+
+
+
+template<> struct ParamType
+{
+ typedef bool const_param_type;
+ typedef bool member_type;
+
+ enum { type = Param::BOOLEAN };
+};
+
+template<> struct ParamType
+{
+ typedef int const_param_type;
+ typedef int member_type;
+
+ enum { type = Param::INT };
+};
+
+template<> struct ParamType
+{
+ typedef double const_param_type;
+ typedef double member_type;
+
+ enum { type = Param::REAL };
+};
+
+template<> struct ParamType
+{
+ typedef const String& const_param_type;
+ typedef String member_type;
+
+ enum { type = Param::STRING };
+};
+
+template<> struct ParamType
+{
+ typedef const Mat& const_param_type;
+ typedef Mat member_type;
+
+ enum { type = Param::MAT };
+};
+
+template<> struct ParamType >
+{
+ typedef const std::vector& const_param_type;
+ typedef std::vector member_type;
+
+ enum { type = Param::MAT_VECTOR };
+};
+
+template<> struct ParamType
+{
+ typedef const Ptr& const_param_type;
+ typedef Ptr member_type;
+
+ enum { type = Param::ALGORITHM };
+};
+
+template<> struct ParamType
+{
+ typedef float const_param_type;
+ typedef float member_type;
+
+ enum { type = Param::FLOAT };
+};
+
+template<> struct ParamType
+{
+ typedef unsigned const_param_type;
+ typedef unsigned member_type;
+
+ enum { type = Param::UNSIGNED_INT };
+};
+
+template<> struct ParamType
+{
+ typedef uint64 const_param_type;
+ typedef uint64 member_type;
+
+ enum { type = Param::UINT64 };
+};
+
+template<> struct ParamType
+{
+ typedef uchar const_param_type;
+ typedef uchar member_type;
+
+ enum { type = Param::UCHAR };
+};
+
+//! @} core_basic
+
+} //namespace cv
+
+#include "opencv2/core/operations.hpp"
+#include "opencv2/core/cvstd.inl.hpp"
+#include "opencv2/core/utility.hpp"
+#include "opencv2/core/optim.hpp"
+
+#endif /*__OPENCV_CORE_HPP__*/
diff --git a/lib/opencv/include/opencv2/core/affine.hpp b/lib/opencv/include/opencv2/core/affine.hpp
new file mode 100644
index 000000000..2bce5b989
--- /dev/null
+++ b/lib/opencv/include/opencv2/core/affine.hpp
@@ -0,0 +1,522 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_AFFINE3_HPP__
+#define __OPENCV_CORE_AFFINE3_HPP__
+
+#ifdef __cplusplus
+
+#include
+
+namespace cv
+{
+
+//! @addtogroup core
+//! @{
+
+ /** @brief Affine transform
+ @todo document
+ */
+ template
+ class Affine3
+ {
+ public:
+ typedef T float_type;
+ typedef Matx Mat3;
+ typedef Matx Mat4;
+ typedef Vec Vec3;
+
+ Affine3();
+
+ //! Augmented affine matrix
+ Affine3(const Mat4& affine);
+
+ //! Rotation matrix
+ Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
+
+ //! Rodrigues vector
+ Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
+
+ //! Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix
+ explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
+
+ //! From 16th element array
+ explicit Affine3(const float_type* vals);
+
+ //! Create identity transform
+ static Affine3 Identity();
+
+ //! Rotation matrix
+ void rotation(const Mat3& R);
+
+ //! Rodrigues vector
+ void rotation(const Vec3& rvec);
+
+ //! Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
+ void rotation(const Mat& data);
+
+ void linear(const Mat3& L);
+ void translation(const Vec3& t);
+
+ Mat3 rotation() const;
+ Mat3 linear() const;
+ Vec3 translation() const;
+
+ //! Rodrigues vector
+ Vec3 rvec() const;
+
+ Affine3 inv(int method = cv::DECOMP_SVD) const;
+
+ //! a.rotate(R) is equivalent to Affine(R, 0) * a;
+ Affine3 rotate(const Mat3& R) const;
+
+ //! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a;
+ Affine3 rotate(const Vec3& rvec) const;
+
+ //! a.translate(t) is equivalent to Affine(E, t) * a;
+ Affine3 translate(const Vec3& t) const;
+
+ //! a.concatenate(affine) is equivalent to affine * a;
+ Affine3 concatenate(const Affine3& affine) const;
+
+ template operator Affine3() const;
+
+ template Affine3 cast() const;
+
+ Mat4 matrix;
+
+#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
+ Affine3(const Eigen::Transform& affine);
+ Affine3(const Eigen::Transform& affine);
+ operator Eigen::Transform() const;
+ operator Eigen::Transform() const;
+#endif
+ };
+
+ template static
+ Affine3 operator*(const Affine3& affine1, const Affine3& affine2);
+
+ template static
+ V operator*(const Affine3& affine, const V& vector);
+
+ typedef Affine3 Affine3f;
+ typedef Affine3 Affine3d;
+
+ static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);
+ static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);
+
+ template class DataType< Affine3<_Tp> >
+ {
+ public:
+ typedef Affine3<_Tp> value_type;
+ typedef Affine3::work_type> work_type;
+ typedef _Tp channel_type;
+
+ enum { generic_type = 0,
+ depth = DataType::depth,
+ channels = 16,
+ fmt = DataType::fmt + ((channels - 1) << 8),
+ type = CV_MAKETYPE(depth, channels)
+ };
+
+ typedef Vec vec_type;
+ };
+
+//! @} core
+
+}
+
+//! @cond IGNORED
+
+///////////////////////////////////////////////////////////////////////////////////
+// Implementaiton
+
+template inline
+cv::Affine3::Affine3()
+ : matrix(Mat4::eye())
+{}
+
+template inline
+cv::Affine3::Affine3(const Mat4& affine)
+ : matrix(affine)
+{}
+
+template inline
+cv::Affine3::Affine3(const Mat3& R, const Vec3& t)
+{
+ rotation(R);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template inline
+cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t)
+{
+ rotation(_rvec);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template inline
+cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t)
+{
+ CV_Assert(data.type() == cv::DataType::type);
+
+ if (data.cols == 4 && data.rows == 4)
+ {
+ data.copyTo(matrix);
+ return;
+ }
+ else if (data.cols == 4 && data.rows == 3)
+ {
+ rotation(data(Rect(0, 0, 3, 3)));
+ translation(data(Rect(3, 0, 1, 3)));
+ return;
+ }
+
+ rotation(data);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template inline
+cv::Affine3::Affine3(const float_type* vals) : matrix(vals)
+{}
+
+template inline
+cv::Affine3 cv::Affine3::Identity()
+{
+ return Affine3(cv::Affine3::Mat4::eye());
+}
+
+template inline
+void cv::Affine3::rotation(const Mat3& R)
+{
+ linear(R);
+}
+
+template inline
+void cv::Affine3::rotation(const Vec3& _rvec)
+{
+ double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2];
+ double theta = std::sqrt(rx*rx + ry*ry + rz*rz);
+
+ if (theta < DBL_EPSILON)
+ rotation(Mat3::eye());
+ else
+ {
+ const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+
+ double c = std::cos(theta);
+ double s = std::sin(theta);
+ double c1 = 1. - c;
+ double itheta = (theta != 0) ? 1./theta : 0.;
+
+ rx *= itheta; ry *= itheta; rz *= itheta;
+
+ double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };
+ double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };
+ Mat3 R;
+
+ // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
+ // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
+ for(int k = 0; k < 9; ++k)
+ R.val[k] = static_cast(c*I[k] + c1*rrt[k] + s*_r_x_[k]);
+
+ rotation(R);
+ }
+}
+
+//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
+template inline
+void cv::Affine3::rotation(const cv::Mat& data)
+{
+ CV_Assert(data.type() == cv::DataType::type);
+
+ if (data.cols == 3 && data.rows == 3)
+ {
+ Mat3 R;
+ data.copyTo(R);
+ rotation(R);
+ }
+ else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))
+ {
+ Vec3 _rvec;
+ data.reshape(1, 3).copyTo(_rvec);
+ rotation(_rvec);
+ }
+ else
+ CV_Assert(!"Input marix can be 3x3, 1x3 or 3x1");
+}
+
+template inline
+void cv::Affine3::linear(const Mat3& L)
+{
+ matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2];
+ matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5];
+ matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8];
+}
+
+template inline
+void cv::Affine3::translation(const Vec3& t)
+{
+ matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];
+}
+
+template inline
+typename cv::Affine3::Mat3 cv::Affine3::rotation() const
+{
+ return linear();
+}
+
+template inline
+typename cv::Affine3::Mat3 cv::Affine3::linear() const
+{
+ typename cv::Affine3::Mat3 R;
+ R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2];
+ R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6];
+ R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10];
+ return R;
+}
+
+template inline
+typename cv::Affine3::Vec3 cv::Affine3::translation() const
+{
+ return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);
+}
+
+template inline
+typename cv::Affine3::Vec3 cv::Affine3::rvec() const
+{
+ cv::Vec3d w;
+ cv::Matx33d u, vt, R = rotation();
+ cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);
+ R = u * vt;
+
+ double rx = R.val[7] - R.val[5];
+ double ry = R.val[2] - R.val[6];
+ double rz = R.val[3] - R.val[1];
+
+ double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
+ double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
+ c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
+ double theta = acos(c);
+
+ if( s < 1e-5 )
+ {
+ if( c > 0 )
+ rx = ry = rz = 0;
+ else
+ {
+ double t;
+ t = (R.val[0] + 1) * 0.5;
+ rx = std::sqrt(std::max(t, 0.0));
+ t = (R.val[4] + 1) * 0.5;
+ ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);
+ t = (R.val[8] + 1) * 0.5;
+ rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);
+
+ if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )
+ rz = -rz;
+ theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
+ rx *= theta;
+ ry *= theta;
+ rz *= theta;
+ }
+ }
+ else
+ {
+ double vth = 1/(2*s);
+ vth *= theta;
+ rx *= vth; ry *= vth; rz *= vth;
+ }
+
+ return cv::Vec3d(rx, ry, rz);
+}
+
+template inline
+cv::Affine3 cv::Affine3::inv(int method) const
+{
+ return matrix.inv(method);
+}
+
+template inline
+cv::Affine3 cv::Affine3::rotate(const Mat3& R) const
+{
+ Mat3 Lc = linear();
+ Vec3 tc = translation();
+ Mat4 result;
+ result.val[12] = result.val[13] = result.val[14] = 0;
+ result.val[15] = 1;
+
+ for(int j = 0; j < 3; ++j)
+ {
+ for(int i = 0; i < 3; ++i)
+ {
+ float_type value = 0;
+ for(int k = 0; k < 3; ++k)
+ value += R(j, k) * Lc(k, i);
+ result(j, i) = value;
+ }
+
+ result(j, 3) = R.row(j).dot(tc.t());
+ }
+ return result;
+}
+
+template inline
+cv::Affine3 cv::Affine3::rotate(const Vec3& _rvec) const
+{
+ return rotate(Affine3f(_rvec).rotation());
+}
+
+template inline
+cv::Affine3 cv::Affine3::translate(const Vec3& t) const
+{
+ Mat4 m = matrix;
+ m.val[ 3] += t[0];
+ m.val[ 7] += t[1];
+ m.val[11] += t[2];
+ return m;
+}
+
+template inline
+cv::Affine3 cv::Affine3::concatenate(const Affine3& affine) const
+{
+ return (*this).rotate(affine.rotation()).translate(affine.translation());
+}
+
+template template inline
+cv::Affine3::operator Affine3() const
+{
+ return Affine3(matrix);
+}
+
+template template inline
+cv::Affine3 cv::Affine3::cast() const
+{
+ return Affine3(matrix);
+}
+
+template inline
+cv::Affine3 cv::operator*(const cv::Affine3& affine1, const cv::Affine3& affine2)
+{
+ return affine2.concatenate(affine1);
+}
+
+template inline
+V cv::operator*(const cv::Affine3& affine, const V& v)
+{
+ const typename Affine3::Mat4& m = affine.matrix;
+
+ V r;
+ r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];
+ r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];
+ r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];
+ return r;
+}
+
+static inline
+cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)
+{
+ const cv::Matx44f& m = affine.matrix;
+ cv::Vec3f r;
+ r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
+ r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
+ r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
+ return r;
+}
+
+static inline
+cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
+{
+ const cv::Matx44d& m = affine.matrix;
+ cv::Vec3d r;
+ r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
+ r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
+ r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
+ return r;
+}
+
+
+
+#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
+
+template inline
+cv::Affine3::Affine3(const Eigen::Transform& affine)
+{
+ cv::Mat(4, 4, cv::DataType::type, affine.matrix().data()).copyTo(matrix);
+}
+
+template inline
+cv::Affine3::Affine3(const Eigen::Transform& affine)
+{
+ Eigen::Transform a = affine;
+ cv::Mat(4, 4, cv::DataType::type, a.matrix().data()).copyTo(matrix);
+}
+
+template inline
+cv::Affine3::operator Eigen::Transform() const
+{
+ Eigen::Transform r;
+ cv::Mat hdr(4, 4, cv::DataType::type, r.matrix().data());
+ cv::Mat(matrix, false).copyTo(hdr);
+ return r;
+}
+
+template inline
+cv::Affine3::operator Eigen::Transform() const
+{
+ return this->operator Eigen::Transform();
+}
+
+#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */
+
+//! @endcond
+
+#endif /* __cplusplus */
+
+#endif /* __OPENCV_CORE_AFFINE3_HPP__ */
diff --git a/lib/opencv/include/opencv2/core/base.hpp b/lib/opencv/include/opencv2/core/base.hpp
new file mode 100644
index 000000000..ed633f5dc
--- /dev/null
+++ b/lib/opencv/include/opencv2/core/base.hpp
@@ -0,0 +1,689 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Copyright (C) 2014, Itseez Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_BASE_HPP__
+#define __OPENCV_CORE_BASE_HPP__
+
+#ifndef __cplusplus
+# error base.hpp header must be compiled as C++
+#endif
+
+#include
+#include
+
+#include "opencv2/core/cvdef.h"
+#include "opencv2/core/cvstd.hpp"
+
+namespace cv
+{
+
+//! @addtogroup core_utils
+//! @{
+
+namespace Error {
+//! error codes
+enum Code {
+ StsOk= 0, //!< everithing is ok
+ StsBackTrace= -1, //!< pseudo error for back trace
+ StsError= -2, //!< unknown /unspecified error
+ StsInternal= -3, //!< internal error (bad state)
+ StsNoMem= -4, //!< insufficient memory
+ StsBadArg= -5, //!< function arg/param is bad
+ StsBadFunc= -6, //!< unsupported function
+ StsNoConv= -7, //!< iter. didn't converge
+ StsAutoTrace= -8, //!< tracing
+ HeaderIsNull= -9, //!< image header is NULL
+ BadImageSize= -10, //!< image size is invalid
+ BadOffset= -11, //!< offset is invalid
+ BadDataPtr= -12, //!<
+ BadStep= -13, //!<
+ BadModelOrChSeq= -14, //!<
+ BadNumChannels= -15, //!<
+ BadNumChannel1U= -16, //!<
+ BadDepth= -17, //!<
+ BadAlphaChannel= -18, //!<
+ BadOrder= -19, //!<
+ BadOrigin= -20, //!<
+ BadAlign= -21, //!<
+ BadCallBack= -22, //!<
+ BadTileSize= -23, //!<
+ BadCOI= -24, //!<
+ BadROISize= -25, //!<
+ MaskIsTiled= -26, //!<
+ StsNullPtr= -27, //!< null pointer
+ StsVecLengthErr= -28, //!< incorrect vector length
+ StsFilterStructContentErr= -29, //!< incorr. filter structure content
+ StsKernelStructContentErr= -30, //!< incorr. transform kernel content
+ StsFilterOffsetErr= -31, //!< incorrect filter ofset value
+ StsBadSize= -201, //!< the input/output structure size is incorrect
+ StsDivByZero= -202, //!< division by zero
+ StsInplaceNotSupported= -203, //!< in-place operation is not supported
+ StsObjectNotFound= -204, //!< request can't be completed
+ StsUnmatchedFormats= -205, //!< formats of input/output arrays differ
+ StsBadFlag= -206, //!< flag is wrong or not supported
+ StsBadPoint= -207, //!< bad CvPoint
+ StsBadMask= -208, //!< bad format of mask (neither 8uC1 nor 8sC1)
+ StsUnmatchedSizes= -209, //!< sizes of input/output structures do not match
+ StsUnsupportedFormat= -210, //!< the data format/type is not supported by the function
+ StsOutOfRange= -211, //!< some of parameters are out of range
+ StsParseError= -212, //!< invalid syntax/structure of the parsed file
+ StsNotImplemented= -213, //!< the requested function/feature is not implemented
+ StsBadMemBlock= -214, //!< an allocated block has been corrupted
+ StsAssert= -215, //!< assertion failed
+ GpuNotSupported= -216,
+ GpuApiCallError= -217,
+ OpenGlNotSupported= -218,
+ OpenGlApiCallError= -219,
+ OpenCLApiCallError= -220,
+ OpenCLDoubleNotSupported= -221,
+ OpenCLInitError= -222,
+ OpenCLNoAMDBlasFft= -223
+};
+} //Error
+
+//! @} core_utils
+
+//! @addtogroup core_array
+//! @{
+
+//! matrix decomposition types
+enum DecompTypes {
+ /** Gaussian elimination with the optimal pivot element chosen. */
+ DECOMP_LU = 0,
+ /** singular value decomposition (SVD) method; the system can be over-defined and/or the matrix
+ src1 can be singular */
+ DECOMP_SVD = 1,
+ /** eigenvalue decomposition; the matrix src1 must be symmetrical */
+ DECOMP_EIG = 2,
+ /** Cholesky \f$LL^T\f$ factorization; the matrix src1 must be symmetrical and positively
+ defined */
+ DECOMP_CHOLESKY = 3,
+ /** QR factorization; the system can be over-defined and/or the matrix src1 can be singular */
+ DECOMP_QR = 4,
+ /** while all the previous flags are mutually exclusive, this flag can be used together with
+ any of the previous; it means that the normal equations
+ \f$\texttt{src1}^T\cdot\texttt{src1}\cdot\texttt{dst}=\texttt{src1}^T\texttt{src2}\f$ are
+ solved instead of the original system
+ \f$\texttt{src1}\cdot\texttt{dst}=\texttt{src2}\f$ */
+ DECOMP_NORMAL = 16
+};
+
+/** norm types
+- For one array:
+\f[norm = \forkthree{\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
+{ \| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
+{ \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
+
+- Absolute norm for two arrays
+\f[norm = \forkthree{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
+{ \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
+{ \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
+
+- Relative norm for two arrays
+\f[norm = \forkthree{\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE_INF}\) }
+{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L1}\) }
+{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L2}\) }\f]
+
+As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
+The \f$ L_{1}, L_{2} \f$ and \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
+is calculated as follows
+\f{align*}
+ \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\
+ \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\
+ \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2
+\f}
+and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
+\f{align*}
+ \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\
+ \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\
+ \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5.
+\f}
+The following graphic shows all values for the three norm functions \f$\| r(x) \|_{L_1}, \| r(x) \|_{L_2}\f$ and \f$\| r(x) \|_{L_\infty}\f$.
+It is notable that the \f$ L_{1} \f$ norm forms the upper and the \f$ L_{\infty} \f$ norm forms the lower border for the example function \f$ r(x) \f$.
+
+ */
+enum NormTypes { NORM_INF = 1,
+ NORM_L1 = 2,
+ NORM_L2 = 4,
+ NORM_L2SQR = 5,
+ NORM_HAMMING = 6,
+ NORM_HAMMING2 = 7,
+ NORM_TYPE_MASK = 7,
+ NORM_RELATIVE = 8, //!< flag
+ NORM_MINMAX = 32 //!< flag
+ };
+
+//! comparison types
+enum CmpTypes { CMP_EQ = 0, //!< src1 is equal to src2.
+ CMP_GT = 1, //!< src1 is greater than src2.
+ CMP_GE = 2, //!< src1 is greater than or equal to src2.
+ CMP_LT = 3, //!< src1 is less than src2.
+ CMP_LE = 4, //!< src1 is less than or equal to src2.
+ CMP_NE = 5 //!< src1 is unequal to src2.
+ };
+
+//! generalized matrix multiplication flags
+enum GemmFlags { GEMM_1_T = 1, //!< transposes src1
+ GEMM_2_T = 2, //!< transposes src2
+ GEMM_3_T = 4 //!< transposes src3
+ };
+
+enum DftFlags {
+ /** performs an inverse 1D or 2D transform instead of the default forward
+ transform. */
+ DFT_INVERSE = 1,
+ /** scales the result: divide it by the number of array elements. Normally, it is
+ combined with DFT_INVERSE. */
+ DFT_SCALE = 2,
+ /** performs a forward or inverse transform of every individual row of the input
+ matrix; this flag enables you to transform multiple vectors simultaneously and can be used to
+ decrease the overhead (which is sometimes several times larger than the processing itself) to
+ perform 3D and higher-dimensional transformations and so forth.*/
+ DFT_ROWS = 4,
+ /** performs a forward transformation of 1D or 2D real array; the result,
+ though being a complex array, has complex-conjugate symmetry (*CCS*, see the function
+ description below for details), and such an array can be packed into a real array of the same
+ size as input, which is the fastest option and which is what the function does by default;
+ however, you may wish to get a full complex array (for simpler spectrum analysis, and so on) -
+ pass the flag to enable the function to produce a full-size complex output array. */
+ DFT_COMPLEX_OUTPUT = 16,
+ /** performs an inverse transformation of a 1D or 2D complex array; the
+ result is normally a complex array of the same size, however, if the input array has
+ conjugate-complex symmetry (for example, it is a result of forward transformation with
+ DFT_COMPLEX_OUTPUT flag), the output is a real array; while the function itself does not
+ check whether the input is symmetrical or not, you can pass the flag and then the function
+ will assume the symmetry and produce the real output array (note that when the input is packed
+ into a real array and inverse transformation is executed, the function treats the input as a
+ packed complex-conjugate symmetrical array, and the output will also be a real array). */
+ DFT_REAL_OUTPUT = 32,
+ /** performs an inverse 1D or 2D transform instead of the default forward transform. */
+ DCT_INVERSE = DFT_INVERSE,
+ /** performs a forward or inverse transform of every individual row of the input
+ matrix. This flag enables you to transform multiple vectors simultaneously and can be used to
+ decrease the overhead (which is sometimes several times larger than the processing itself) to
+ perform 3D and higher-dimensional transforms and so forth.*/
+ DCT_ROWS = DFT_ROWS
+};
+
+//! Various border types, image boundaries are denoted with `|`
+//! @see borderInterpolate, copyMakeBorder
+enum BorderTypes {
+ BORDER_CONSTANT = 0, //!< `iiiiii|abcdefgh|iiiiiii` with some specified `i`
+ BORDER_REPLICATE = 1, //!< `aaaaaa|abcdefgh|hhhhhhh`
+ BORDER_REFLECT = 2, //!< `fedcba|abcdefgh|hgfedcb`
+ BORDER_WRAP = 3, //!< `cdefgh|abcdefgh|abcdefg`
+ BORDER_REFLECT_101 = 4, //!< `gfedcb|abcdefgh|gfedcba`
+ BORDER_TRANSPARENT = 5, //!< `uvwxyz|absdefgh|ijklmno`
+
+ BORDER_REFLECT101 = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101
+ BORDER_DEFAULT = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101
+ BORDER_ISOLATED = 16 //!< do not look outside of ROI
+};
+
+//! @} core_array
+
+//! @addtogroup core_utils
+//! @{
+
+//! @cond IGNORED
+
+//////////////// static assert /////////////////
+#define CVAUX_CONCAT_EXP(a, b) a##b
+#define CVAUX_CONCAT(a, b) CVAUX_CONCAT_EXP(a,b)
+
+#if defined(__clang__)
+# ifndef __has_extension
+# define __has_extension __has_feature /* compatibility, for older versions of clang */
+# endif
+# if __has_extension(cxx_static_assert)
+# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition)
+# elif __has_extension(c_static_assert)
+# define CV_StaticAssert(condition, reason) _Static_assert((condition), reason " " #condition)
+# endif
+#elif defined(__GNUC__)
+# if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L)
+# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition)
+# endif
+#elif defined(_MSC_VER)
+# if _MSC_VER >= 1600 /* MSVC 10 */
+# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition)
+# endif
+#endif
+#ifndef CV_StaticAssert
+# if !defined(__clang__) && defined(__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 302)
+# define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()); })
+# else
+ template struct CV_StaticAssert_failed;
+ template <> struct CV_StaticAssert_failed { enum { val = 1 }; };
+ template struct CV_StaticAssert_test {};
+# define CV_StaticAssert(condition, reason)\
+ typedef cv::CV_StaticAssert_test< sizeof(cv::CV_StaticAssert_failed< static_cast(condition) >) > CVAUX_CONCAT(CV_StaticAssert_failed_at_, __LINE__)
+# endif
+#endif
+
+// Suppress warning "-Wdeprecated-declarations" / C4996
+#if defined(_MSC_VER)
+ #define CV_DO_PRAGMA(x) __pragma(x)
+#elif defined(__GNUC__)
+ #define CV_DO_PRAGMA(x) _Pragma (#x)
+#else
+ #define CV_DO_PRAGMA(x)
+#endif
+
+#ifdef _MSC_VER
+#define CV_SUPPRESS_DEPRECATED_START \
+ CV_DO_PRAGMA(warning(push)) \
+ CV_DO_PRAGMA(warning(disable: 4996))
+#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(warning(pop))
+#elif defined (__clang__) || ((__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 405))
+#define CV_SUPPRESS_DEPRECATED_START \
+ CV_DO_PRAGMA(GCC diagnostic push) \
+ CV_DO_PRAGMA(GCC diagnostic ignored "-Wdeprecated-declarations")
+#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(GCC diagnostic pop)
+#else
+#define CV_SUPPRESS_DEPRECATED_START
+#define CV_SUPPRESS_DEPRECATED_END
+#endif
+#define CV_UNUSED(name) (void)name
+//! @endcond
+
+/*! @brief Signals an error and raises the exception.
+
+By default the function prints information about the error to stderr,
+then it either stops if setBreakOnError() had been called before or raises the exception.
+It is possible to alternate error processing by using redirectError().
+@param _code - error code (Error::Code)
+@param _err - error description
+@param _func - function name. Available only when the compiler supports getting it
+@param _file - source file name where the error has occured
+@param _line - line number in the source file where the error has occured
+@see CV_Error, CV_Error_, CV_ErrorNoReturn, CV_ErrorNoReturn_, CV_Assert, CV_DbgAssert
+ */
+CV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
+
+#ifdef __GNUC__
+# if defined __clang__ || defined __APPLE__
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Winvalid-noreturn"
+# endif
+#endif
+
+/** same as cv::error, but does not return */
+CV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const char* _func, const char* _file, int _line)
+{
+ error(_code, _err, _func, _file, _line);
+#ifdef __GNUC__
+# if !defined __clang__ && !defined __APPLE__
+ // this suppresses this warning: "noreturn" function does return [enabled by default]
+ __builtin_trap();
+ // or use infinite loop: for (;;) {}
+# endif
+#endif
+}
+#ifdef __GNUC__
+# if defined __clang__ || defined __APPLE__
+# pragma GCC diagnostic pop
+# endif
+#endif
+
+#if defined __GNUC__
+#define CV_Func __func__
+#elif defined _MSC_VER
+#define CV_Func __FUNCTION__
+#else
+#define CV_Func ""
+#endif
+
+/** @brief Call the error handler.
+
+Currently, the error handler prints the error code and the error message to the standard
+error stream `stderr`. In the Debug configuration, it then provokes memory access violation, so that
+the execution stack and all the parameters can be analyzed by the debugger. In the Release
+configuration, the exception is thrown.
+
+@param code one of Error::Code
+@param msg error message
+*/
+#define CV_Error( code, msg ) cv::error( code, msg, CV_Func, __FILE__, __LINE__ )
+
+/** @brief Call the error handler.
+
+This macro can be used to construct an error message on-fly to include some dynamic information,
+for example:
+@code
+ // note the extra parentheses around the formatted text message
+ CV_Error_( CV_StsOutOfRange,
+ ("the value at (%d, %d)=%g is out of range", badPt.x, badPt.y, badValue));
+@endcode
+@param code one of Error::Code
+@param args printf-like formatted error message in parentheses
+*/
+#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ )
+
+/** @brief Checks a condition at runtime and throws exception if it fails
+
+The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros
+raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
+configurations while CV_DbgAssert is only retained in the Debug configuration.
+*/
+#define CV_Assert( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
+
+/** same as CV_Error(code,msg), but does not return */
+#define CV_ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ )
+
+/** same as CV_Error_(code,args), but does not return */
+#define CV_ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ )
+
+/** replaced with CV_Assert(expr) in Debug configuration */
+#ifdef _DEBUG
+# define CV_DbgAssert(expr) CV_Assert(expr)
+#else
+# define CV_DbgAssert(expr)
+#endif
+
+/*
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+struct CV_EXPORTS Hamming
+{
+ enum { normType = NORM_HAMMING };
+ typedef unsigned char ValueType;
+ typedef int ResultType;
+
+ /** this will count the bits in a ^ b
+ */
+ ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const;
+};
+
+typedef Hamming HammingLUT;
+
+/////////////////////////////////// inline norms ////////////////////////////////////
+
+template inline _Tp cv_abs(_Tp x) { return std::abs(x); }
+inline int cv_abs(uchar x) { return x; }
+inline int cv_abs(schar x) { return std::abs(x); }
+inline int cv_abs(ushort x) { return x; }
+inline int cv_abs(short x) { return std::abs(x); }
+
+template static inline
+_AccTp normL2Sqr(const _Tp* a, int n)
+{
+ _AccTp s = 0;
+ int i=0;
+#if CV_ENABLE_UNROLLED
+ for( ; i <= n - 4; i += 4 )
+ {
+ _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3];
+ s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
+ }
+#endif
+ for( ; i < n; i++ )
+ {
+ _AccTp v = a[i];
+ s += v*v;
+ }
+ return s;
+}
+
+template static inline
+_AccTp normL1(const _Tp* a, int n)
+{
+ _AccTp s = 0;
+ int i = 0;
+#if CV_ENABLE_UNROLLED
+ for(; i <= n - 4; i += 4 )
+ {
+ s += (_AccTp)cv_abs(a[i]) + (_AccTp)cv_abs(a[i+1]) +
+ (_AccTp)cv_abs(a[i+2]) + (_AccTp)cv_abs(a[i+3]);
+ }
+#endif
+ for( ; i < n; i++ )
+ s += cv_abs(a[i]);
+ return s;
+}
+
+template static inline
+_AccTp normInf(const _Tp* a, int n)
+{
+ _AccTp s = 0;
+ for( int i = 0; i < n; i++ )
+ s = std::max(s, (_AccTp)cv_abs(a[i]));
+ return s;
+}
+
+template static inline
+_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)
+{
+ _AccTp s = 0;
+ int i= 0;
+#if CV_ENABLE_UNROLLED
+ for(; i <= n - 4; i += 4 )
+ {
+ _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
+ s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
+ }
+#endif
+ for( ; i < n; i++ )
+ {
+ _AccTp v = _AccTp(a[i] - b[i]);
+ s += v*v;
+ }
+ return s;
+}
+
+static inline float normL2Sqr(const float* a, const float* b, int n)
+{
+ float s = 0.f;
+ for( int i = 0; i < n; i++ )
+ {
+ float v = a[i] - b[i];
+ s += v*v;
+ }
+ return s;
+}
+
+template static inline
+_AccTp normL1(const _Tp* a, const _Tp* b, int n)
+{
+ _AccTp s = 0;
+ int i= 0;
+#if CV_ENABLE_UNROLLED
+ for(; i <= n - 4; i += 4 )
+ {
+ _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
+ s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3);
+ }
+#endif
+ for( ; i < n; i++ )
+ {
+ _AccTp v = _AccTp(a[i] - b[i]);
+ s += std::abs(v);
+ }
+ return s;
+}
+
+inline float normL1(const float* a, const float* b, int n)
+{
+ float s = 0.f;
+ for( int i = 0; i < n; i++ )
+ {
+ s += std::abs(a[i] - b[i]);
+ }
+ return s;
+}
+
+inline int normL1(const uchar* a, const uchar* b, int n)
+{
+ int s = 0;
+ for( int i = 0; i < n; i++ )
+ {
+ s += std::abs(a[i] - b[i]);
+ }
+ return s;
+}
+
+template static inline
+_AccTp normInf(const _Tp* a, const _Tp* b, int n)
+{
+ _AccTp s = 0;
+ for( int i = 0; i < n; i++ )
+ {
+ _AccTp v0 = a[i] - b[i];
+ s = std::max(s, std::abs(v0));
+ }
+ return s;
+}
+
+/** @brief Computes the cube root of an argument.
+
+ The function cubeRoot computes \f$\sqrt[3]{\texttt{val}}\f$. Negative arguments are handled correctly.
+ NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for
+ single-precision data.
+ @param val A function argument.
+ */
+CV_EXPORTS_W float cubeRoot(float val);
+
+/** @brief Calculates the angle of a 2D vector in degrees.
+
+ The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured
+ in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.
+ @param x x-coordinate of the vector.
+ @param y y-coordinate of the vector.
+ */
+CV_EXPORTS_W float fastAtan2(float y, float x);
+
+/** proxy for hal::LU */
+CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);
+/** proxy for hal::LU */
+CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);
+/** proxy for hal::Cholesky */
+CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);
+/** proxy for hal::Cholesky */
+CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
+
+////////////////// forward declarations for important OpenCV types //////////////////
+
+//! @cond IGNORED
+
+template class Vec;
+template class Matx;
+
+template class Complex;
+template class Point_;
+template class Point3_;
+template class Size_;
+template class Rect_;
+template class Scalar_;
+
+class CV_EXPORTS RotatedRect;
+class CV_EXPORTS Range;
+class CV_EXPORTS TermCriteria;
+class CV_EXPORTS KeyPoint;
+class CV_EXPORTS DMatch;
+class CV_EXPORTS RNG;
+
+class CV_EXPORTS Mat;
+class CV_EXPORTS MatExpr;
+
+class CV_EXPORTS UMat;
+
+class CV_EXPORTS SparseMat;
+typedef Mat MatND;
+
+template class Mat_;
+template class SparseMat_;
+
+class CV_EXPORTS MatConstIterator;
+class CV_EXPORTS SparseMatIterator;
+class CV_EXPORTS SparseMatConstIterator;
+template class MatIterator_;
+template class MatConstIterator_;
+template class SparseMatIterator_;
+template class SparseMatConstIterator_;
+
+namespace ogl
+{
+ class CV_EXPORTS Buffer;
+ class CV_EXPORTS Texture2D;
+ class CV_EXPORTS Arrays;
+}
+
+namespace cuda
+{
+ class CV_EXPORTS GpuMat;
+ class CV_EXPORTS HostMem;
+ class CV_EXPORTS Stream;
+ class CV_EXPORTS Event;
+}
+
+namespace cudev
+{
+ template class GpuMat_;
+}
+
+namespace ipp
+{
+CV_EXPORTS int getIppFeatures();
+CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,
+ int line = 0);
+CV_EXPORTS int getIppStatus();
+CV_EXPORTS String getIppErrorLocation();
+CV_EXPORTS bool useIPP();
+CV_EXPORTS void setUseIPP(bool flag);
+
+} // ipp
+
+//! @endcond
+
+//! @} core_utils
+
+
+
+
+} // cv
+
+#include "opencv2/core/neon_utils.hpp"
+
+#endif //__OPENCV_CORE_BASE_HPP__
diff --git a/lib/opencv/include/opencv2/core/bufferpool.hpp b/lib/opencv/include/opencv2/core/bufferpool.hpp
new file mode 100644
index 000000000..76df2d29f
--- /dev/null
+++ b/lib/opencv/include/opencv2/core/bufferpool.hpp
@@ -0,0 +1,31 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved.
+
+#ifndef __OPENCV_CORE_BUFFER_POOL_HPP__
+#define __OPENCV_CORE_BUFFER_POOL_HPP__
+
+namespace cv
+{
+
+//! @addtogroup core
+//! @{
+
+class BufferPoolController
+{
+protected:
+ ~BufferPoolController() { }
+public:
+ virtual size_t getReservedSize() const = 0;
+ virtual size_t getMaxReservedSize() const = 0;
+ virtual void setMaxReservedSize(size_t size) = 0;
+ virtual void freeAllReservedBuffers() = 0;
+};
+
+//! @}
+
+}
+
+#endif // __OPENCV_CORE_BUFFER_POOL_HPP__
diff --git a/lib/opencv/include/opencv2/core/core.hpp b/lib/opencv/include/opencv2/core/core.hpp
new file mode 100644
index 000000000..438918359
--- /dev/null
+++ b/lib/opencv/include/opencv2/core/core.hpp
@@ -0,0 +1,48 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifdef __OPENCV_BUILD
+#error this is a compatibility header which should not be used inside the OpenCV library
+#endif
+
+#include "opencv2/core.hpp"
diff --git a/lib/opencv/include/opencv2/core/core_c.h b/lib/opencv/include/opencv2/core/core_c.h
new file mode 100644
index 000000000..a0ed63264
--- /dev/null
+++ b/lib/opencv/include/opencv2/core/core_c.h
@@ -0,0 +1,3152 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+
+#ifndef __OPENCV_CORE_C_H__
+#define __OPENCV_CORE_C_H__
+
+#include "opencv2/core/types_c.h"
+
+#ifdef __cplusplus
+# ifdef _MSC_VER
+/* disable warning C4190: 'function' has C-linkage specified, but returns UDT 'typename'
+ which is incompatible with C
+
+ It is OK to disable it because we only extend few plain structures with
+ C++ construrtors for simpler interoperability with C++ API of the library
+*/
+# pragma warning(disable:4190)
+# elif defined __clang__ && __clang_major__ >= 3
+# pragma GCC diagnostic ignored "-Wreturn-type-c-linkage"
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup core_c
+ @{
+*/
+
+/****************************************************************************************\
+* Array allocation, deallocation, initialization and access to elements *
+\****************************************************************************************/
+
+/** `malloc` wrapper.
+ If there is no enough memory, the function
+ (as well as other OpenCV functions that call cvAlloc)
+ raises an error. */
+CVAPI(void*) cvAlloc( size_t size );
+
+/** `free` wrapper.
+ Here and further all the memory releasing functions
+ (that all call cvFree) take double pointer in order to
+ to clear pointer to the data after releasing it.
+ Passing pointer to NULL pointer is Ok: nothing happens in this case
+*/
+CVAPI(void) cvFree_( void* ptr );
+#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0)
+
+/** @brief Creates an image header but does not allocate the image data.
+
+@param size Image width and height
+@param depth Image depth (see cvCreateImage )
+@param channels Number of channels (see cvCreateImage )
+ */
+CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels );
+
+/** @brief Initializes an image header that was previously allocated.
+
+The returned IplImage\* points to the initialized header.
+@param image Image header to initialize
+@param size Image width and height
+@param depth Image depth (see cvCreateImage )
+@param channels Number of channels (see cvCreateImage )
+@param origin Top-left IPL_ORIGIN_TL or bottom-left IPL_ORIGIN_BL
+@param align Alignment for image rows, typically 4 or 8 bytes
+ */
+CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth,
+ int channels, int origin CV_DEFAULT(0),
+ int align CV_DEFAULT(4));
+
+/** @brief Creates an image header and allocates the image data.
+
+This function call is equivalent to the following code:
+@code
+ header = cvCreateImageHeader(size, depth, channels);
+ cvCreateData(header);
+@endcode
+@param size Image width and height
+@param depth Bit depth of image elements. See IplImage for valid depths.
+@param channels Number of channels per pixel. See IplImage for details. This function only creates
+images with interleaved channels.
+ */
+CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels );
+
+/** @brief Deallocates an image header.
+
+This call is an analogue of :
+@code
+ if(image )
+ {
+ iplDeallocate(*image, IPL_IMAGE_HEADER | IPL_IMAGE_ROI);
+ *image = 0;
+ }
+@endcode
+but it does not use IPL functions by default (see the CV_TURN_ON_IPL_COMPATIBILITY macro).
+@param image Double pointer to the image header
+ */
+CVAPI(void) cvReleaseImageHeader( IplImage** image );
+
+/** @brief Deallocates the image header and the image data.
+
+This call is a shortened form of :
+@code
+ if(*image )
+ {
+ cvReleaseData(*image);
+ cvReleaseImageHeader(image);
+ }
+@endcode
+@param image Double pointer to the image header
+*/
+CVAPI(void) cvReleaseImage( IplImage** image );
+
+/** Creates a copy of IPL image (widthStep may differ) */
+CVAPI(IplImage*) cvCloneImage( const IplImage* image );
+
+/** @brief Sets the channel of interest in an IplImage.
+
+If the ROI is set to NULL and the coi is *not* 0, the ROI is allocated. Most OpenCV functions do
+*not* support the COI setting, so to process an individual image/matrix channel one may copy (via
+cvCopy or cvSplit) the channel to a separate image/matrix, process it and then copy the result
+back (via cvCopy or cvMerge) if needed.
+@param image A pointer to the image header
+@param coi The channel of interest. 0 - all channels are selected, 1 - first channel is selected,
+etc. Note that the channel indices become 1-based.
+ */
+CVAPI(void) cvSetImageCOI( IplImage* image, int coi );
+
+/** @brief Returns the index of the channel of interest.
+
+Returns the channel of interest of in an IplImage. Returned values correspond to the coi in
+cvSetImageCOI.
+@param image A pointer to the image header
+ */
+CVAPI(int) cvGetImageCOI( const IplImage* image );
+
+/** @brief Sets an image Region Of Interest (ROI) for a given rectangle.
+
+If the original image ROI was NULL and the rect is not the whole image, the ROI structure is
+allocated.
+
+Most OpenCV functions support the use of ROI and treat the image rectangle as a separate image. For
+example, all of the pixel coordinates are counted from the top-left (or bottom-left) corner of the
+ROI, not the original image.
+@param image A pointer to the image header
+@param rect The ROI rectangle
+ */
+CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect );
+
+/** @brief Resets the image ROI to include the entire image and releases the ROI structure.
+
+This produces a similar result to the following, but in addition it releases the ROI structure. :
+@code
+ cvSetImageROI(image, cvRect(0, 0, image->width, image->height ));
+ cvSetImageCOI(image, 0);
+@endcode
+@param image A pointer to the image header
+ */
+CVAPI(void) cvResetImageROI( IplImage* image );
+
+/** @brief Returns the image ROI.
+
+If there is no ROI set, cvRect(0,0,image-\>width,image-\>height) is returned.
+@param image A pointer to the image header
+ */
+CVAPI(CvRect) cvGetImageROI( const IplImage* image );
+
+/** @brief Creates a matrix header but does not allocate the matrix data.
+
+The function allocates a new matrix header and returns a pointer to it. The matrix data can then be
+allocated using cvCreateData or set explicitly to user-allocated data via cvSetData.
+@param rows Number of rows in the matrix
+@param cols Number of columns in the matrix
+@param type Type of the matrix elements, see cvCreateMat
+ */
+CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type );
+
+#define CV_AUTOSTEP 0x7fffffff
+
+/** @brief Initializes a pre-allocated matrix header.
+
+This function is often used to process raw data with OpenCV matrix functions. For example, the
+following code computes the matrix product of two matrices, stored as ordinary arrays:
+@code
+ double a[] = { 1, 2, 3, 4,
+ 5, 6, 7, 8,
+ 9, 10, 11, 12 };
+
+ double b[] = { 1, 5, 9,
+ 2, 6, 10,
+ 3, 7, 11,
+ 4, 8, 12 };
+
+ double c[9];
+ CvMat Ma, Mb, Mc ;
+
+ cvInitMatHeader(&Ma, 3, 4, CV_64FC1, a);
+ cvInitMatHeader(&Mb, 4, 3, CV_64FC1, b);
+ cvInitMatHeader(&Mc, 3, 3, CV_64FC1, c);
+
+ cvMatMulAdd(&Ma, &Mb, 0, &Mc);
+ // the c array now contains the product of a (3x4) and b (4x3)
+@endcode
+@param mat A pointer to the matrix header to be initialized
+@param rows Number of rows in the matrix
+@param cols Number of columns in the matrix
+@param type Type of the matrix elements, see cvCreateMat .
+@param data Optional: data pointer assigned to the matrix header
+@param step Optional: full row width in bytes of the assigned data. By default, the minimal
+possible step is used which assumes there are no gaps between subsequent rows of the matrix.
+ */
+CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols,
+ int type, void* data CV_DEFAULT(NULL),
+ int step CV_DEFAULT(CV_AUTOSTEP) );
+
+/** @brief Creates a matrix header and allocates the matrix data.
+
+The function call is equivalent to the following code:
+@code
+ CvMat* mat = cvCreateMatHeader(rows, cols, type);
+ cvCreateData(mat);
+@endcode
+@param rows Number of rows in the matrix
+@param cols Number of columns in the matrix
+@param type The type of the matrix elements in the form
+CV_\\C\ , where S=signed, U=unsigned, F=float. For
+example, CV _ 8UC1 means the elements are 8-bit unsigned and the there is 1 channel, and CV _
+32SC2 means the elements are 32-bit signed and there are 2 channels.
+ */
+CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type );
+
+/** @brief Deallocates a matrix.
+
+The function decrements the matrix data reference counter and deallocates matrix header. If the data
+reference counter is 0, it also deallocates the data. :
+@code
+ if(*mat )
+ cvDecRefData(*mat);
+ cvFree((void**)mat);
+@endcode
+@param mat Double pointer to the matrix
+ */
+CVAPI(void) cvReleaseMat( CvMat** mat );
+
+/** @brief Decrements an array data reference counter.
+
+The function decrements the data reference counter in a CvMat or CvMatND if the reference counter
+
+pointer is not NULL. If the counter reaches zero, the data is deallocated. In the current
+implementation the reference counter is not NULL only if the data was allocated using the
+cvCreateData function. The counter will be NULL in other cases such as: external data was assigned
+to the header using cvSetData, header is part of a larger matrix or image, or the header was
+converted from an image or n-dimensional matrix header.
+@param arr Pointer to an array header
+ */
+CV_INLINE void cvDecRefData( CvArr* arr )
+{
+ if( CV_IS_MAT( arr ))
+ {
+ CvMat* mat = (CvMat*)arr;
+ mat->data.ptr = NULL;
+ if( mat->refcount != NULL && --*mat->refcount == 0 )
+ cvFree( &mat->refcount );
+ mat->refcount = NULL;
+ }
+ else if( CV_IS_MATND( arr ))
+ {
+ CvMatND* mat = (CvMatND*)arr;
+ mat->data.ptr = NULL;
+ if( mat->refcount != NULL && --*mat->refcount == 0 )
+ cvFree( &mat->refcount );
+ mat->refcount = NULL;
+ }
+}
+
+/** @brief Increments array data reference counter.
+
+The function increments CvMat or CvMatND data reference counter and returns the new counter value if
+the reference counter pointer is not NULL, otherwise it returns zero.
+@param arr Array header
+ */
+CV_INLINE int cvIncRefData( CvArr* arr )
+{
+ int refcount = 0;
+ if( CV_IS_MAT( arr ))
+ {
+ CvMat* mat = (CvMat*)arr;
+ if( mat->refcount != NULL )
+ refcount = ++*mat->refcount;
+ }
+ else if( CV_IS_MATND( arr ))
+ {
+ CvMatND* mat = (CvMatND*)arr;
+ if( mat->refcount != NULL )
+ refcount = ++*mat->refcount;
+ }
+ return refcount;
+}
+
+
+/** Creates an exact copy of the input matrix (except, may be, step value) */
+CVAPI(CvMat*) cvCloneMat( const CvMat* mat );
+
+
+/** @brief Returns matrix header corresponding to the rectangular sub-array of input image or matrix.
+
+The function returns header, corresponding to a specified rectangle of the input array. In other
+
+words, it allows the user to treat a rectangular part of input array as a stand-alone array. ROI is
+taken into account by the function so the sub-array of ROI is actually extracted.
+@param arr Input array
+@param submat Pointer to the resultant sub-array header
+@param rect Zero-based coordinates of the rectangle of interest
+ */
+CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect );
+#define cvGetSubArr cvGetSubRect
+
+/** @brief Returns array row or row span.
+
+The functions return the header, corresponding to a specified row/row span of the input array.
+cvGetRow(arr, submat, row) is a shortcut for cvGetRows(arr, submat, row, row+1).
+@param arr Input array
+@param submat Pointer to the resulting sub-array header
+@param start_row Zero-based index of the starting row (inclusive) of the span
+@param end_row Zero-based index of the ending row (exclusive) of the span
+@param delta_row Index step in the row span. That is, the function extracts every delta_row -th
+row from start_row and up to (but not including) end_row .
+ */
+CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat,
+ int start_row, int end_row,
+ int delta_row CV_DEFAULT(1));
+
+/** @overload
+@param arr Input array
+@param submat Pointer to the resulting sub-array header
+@param row Zero-based index of the selected row
+*/
+CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row )
+{
+ return cvGetRows( arr, submat, row, row + 1, 1 );
+}
+
+
+/** @brief Returns one of more array columns.
+
+The functions return the header, corresponding to a specified column span of the input array. That
+
+is, no data is copied. Therefore, any modifications of the submatrix will affect the original array.
+If you need to copy the columns, use cvCloneMat. cvGetCol(arr, submat, col) is a shortcut for
+cvGetCols(arr, submat, col, col+1).
+@param arr Input array
+@param submat Pointer to the resulting sub-array header
+@param start_col Zero-based index of the starting column (inclusive) of the span
+@param end_col Zero-based index of the ending column (exclusive) of the span
+ */
+CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat,
+ int start_col, int end_col );
+
+/** @overload
+@param arr Input array
+@param submat Pointer to the resulting sub-array header
+@param col Zero-based index of the selected column
+*/
+CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col )
+{
+ return cvGetCols( arr, submat, col, col + 1 );
+}
+
+/** @brief Returns one of array diagonals.
+
+The function returns the header, corresponding to a specified diagonal of the input array.
+@param arr Input array
+@param submat Pointer to the resulting sub-array header
+@param diag Index of the array diagonal. Zero value corresponds to the main diagonal, -1
+corresponds to the diagonal above the main, 1 corresponds to the diagonal below the main, and so
+forth.
+ */
+CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat,
+ int diag CV_DEFAULT(0));
+
+/** low-level scalar <-> raw data conversion functions */
+CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type,
+ int extend_to_12 CV_DEFAULT(0) );
+
+CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar );
+
+/** @brief Creates a new matrix header but does not allocate the matrix data.
+
+The function allocates a header for a multi-dimensional dense array. The array data can further be
+allocated using cvCreateData or set explicitly to user-allocated data via cvSetData.
+@param dims Number of array dimensions
+@param sizes Array of dimension sizes
+@param type Type of array elements, see cvCreateMat
+ */
+CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type );
+
+/** @brief Creates the header and allocates the data for a multi-dimensional dense array.
+
+This function call is equivalent to the following code:
+@code
+ CvMatND* mat = cvCreateMatNDHeader(dims, sizes, type);
+ cvCreateData(mat);
+@endcode
+@param dims Number of array dimensions. This must not exceed CV_MAX_DIM (32 by default, but can be
+changed at build time).
+@param sizes Array of dimension sizes.
+@param type Type of array elements, see cvCreateMat .
+ */
+CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type );
+
+/** @brief Initializes a pre-allocated multi-dimensional array header.
+
+@param mat A pointer to the array header to be initialized
+@param dims The number of array dimensions
+@param sizes An array of dimension sizes
+@param type Type of array elements, see cvCreateMat
+@param data Optional data pointer assigned to the matrix header
+ */
+CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes,
+ int type, void* data CV_DEFAULT(NULL) );
+
+/** @brief Deallocates a multi-dimensional array.
+
+The function decrements the array data reference counter and releases the array header. If the
+reference counter reaches 0, it also deallocates the data. :
+@code
+ if(*mat )
+ cvDecRefData(*mat);
+ cvFree((void**)mat);
+@endcode
+@param mat Double pointer to the array
+ */
+CV_INLINE void cvReleaseMatND( CvMatND** mat )
+{
+ cvReleaseMat( (CvMat**)mat );
+}
+
+/** Creates a copy of CvMatND (except, may be, steps) */
+CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat );
+
+/** @brief Creates sparse array.
+
+The function allocates a multi-dimensional sparse array. Initially the array contain no elements,
+that is PtrND and other related functions will return 0 for every index.
+@param dims Number of array dimensions. In contrast to the dense matrix, the number of dimensions is
+practically unlimited (up to \f$2^{16}\f$ ).
+@param sizes Array of dimension sizes
+@param type Type of array elements. The same as for CvMat
+ */
+CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type );
+
+/** @brief Deallocates sparse array.
+
+The function releases the sparse array and clears the array pointer upon exit.
+@param mat Double pointer to the array
+ */
+CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat );
+
+/** Creates a copy of CvSparseMat (except, may be, zero items) */
+CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat );
+
+/** @brief Initializes sparse array elements iterator.
+
+The function initializes iterator of sparse array elements and returns pointer to the first element,
+or NULL if the array is empty.
+@param mat Input array
+@param mat_iterator Initialized iterator
+ */
+CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat,
+ CvSparseMatIterator* mat_iterator );
+
+/** @brief Returns the next sparse matrix element
+
+The function moves iterator to the next sparse matrix element and returns pointer to it. In the
+current version there is no any particular order of the elements, because they are stored in the
+hash table. The sample below demonstrates how to iterate through the sparse matrix:
+@code
+ // print all the non-zero sparse matrix elements and compute their sum
+ double sum = 0;
+ int i, dims = cvGetDims(sparsemat);
+ CvSparseMatIterator it;
+ CvSparseNode* node = cvInitSparseMatIterator(sparsemat, &it);
+
+ for(; node != 0; node = cvGetNextSparseNode(&it))
+ {
+ int* idx = CV_NODE_IDX(array, node);
+ float val = *(float*)CV_NODE_VAL(array, node);
+ printf("M");
+ for(i = 0; i < dims; i++ )
+ printf("[%d]", idx[i]);
+ printf("=%g\n", val);
+
+ sum += val;
+ }
+
+ printf("nTotal sum = %g\n", sum);
+@endcode
+@param mat_iterator Sparse array iterator
+ */
+CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator )
+{
+ if( mat_iterator->node->next )
+ return mat_iterator->node = mat_iterator->node->next;
+ else
+ {
+ int idx;
+ for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ )
+ {
+ CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx];
+ if( node )
+ {
+ mat_iterator->curidx = idx;
+ return mat_iterator->node = node;
+ }
+ }
+ return NULL;
+ }
+}
+
+
+#define CV_MAX_ARR 10
+
+/** matrix iterator: used for n-ary operations on dense arrays */
+typedef struct CvNArrayIterator
+{
+ int count; /**< number of arrays */
+ int dims; /**< number of dimensions to iterate */
+ CvSize size; /**< maximal common linear size: { width = size, height = 1 } */
+ uchar* ptr[CV_MAX_ARR]; /**< pointers to the array slices */
+ int stack[CV_MAX_DIM]; /**< for internal use */
+ CvMatND* hdr[CV_MAX_ARR]; /**< pointers to the headers of the
+ matrices that are processed */
+}
+CvNArrayIterator;
+
+#define CV_NO_DEPTH_CHECK 1
+#define CV_NO_CN_CHECK 2
+#define CV_NO_SIZE_CHECK 4
+
+/** initializes iterator that traverses through several arrays simulteneously
+ (the function together with cvNextArraySlice is used for
+ N-ari element-wise operations) */
+CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs,
+ const CvArr* mask, CvMatND* stubs,
+ CvNArrayIterator* array_iterator,
+ int flags CV_DEFAULT(0) );
+
+/** returns zero value if iteration is finished, non-zero (slice length) otherwise */
+CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator );
+
+
+/** @brief Returns type of array elements.
+
+The function returns type of the array elements. In the case of IplImage the type is converted to
+CvMat-like representation. For example, if the image has been created as:
+@code
+ IplImage* img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
+@endcode
+The code cvGetElemType(img) will return CV_8UC3.
+@param arr Input array
+ */
+CVAPI(int) cvGetElemType( const CvArr* arr );
+
+/** @brief Return number of array dimensions
+
+The function returns the array dimensionality and the array of dimension sizes. In the case of
+IplImage or CvMat it always returns 2 regardless of number of image/matrix rows. For example, the
+following code calculates total number of array elements:
+@code
+ int sizes[CV_MAX_DIM];
+ int i, total = 1;
+ int dims = cvGetDims(arr, size);
+ for(i = 0; i < dims; i++ )
+ total *= sizes[i];
+@endcode
+@param arr Input array
+@param sizes Optional output vector of the array dimension sizes. For 2d arrays the number of rows
+(height) goes first, number of columns (width) next.
+ */
+CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) );
+
+
+/** @brief Returns array size along the specified dimension.
+
+@param arr Input array
+@param index Zero-based dimension index (for matrices 0 means number of rows, 1 means number of
+columns; for images 0 means height, 1 means width)
+ */
+CVAPI(int) cvGetDimSize( const CvArr* arr, int index );
+
+
+/** @brief Return pointer to a particular array element.
+
+The functions return a pointer to a specific array element. Number of array dimension should match
+to the number of indices passed to the function except for cvPtr1D function that can be used for
+sequential access to 1D, 2D or nD dense arrays.
+
+The functions can be used for sparse arrays as well - if the requested node does not exist they
+create it and set it to zero.
+
+All these as well as other functions accessing array elements ( cvGetND , cvGetRealND , cvSet
+, cvSetND , cvSetRealND ) raise an error in case if the element index is out of range.
+@param arr Input array
+@param idx0 The first zero-based component of the element index
+@param type Optional output parameter: type of matrix elements
+ */
+CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL));
+/** @overload */
+CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) );
+/** @overload */
+CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2,
+ int* type CV_DEFAULT(NULL));
+/** @overload
+@param arr Input array
+@param idx Array of the element indices
+@param type Optional output parameter: type of matrix elements
+@param create_node Optional input parameter for sparse matrices. Non-zero value of the parameter
+means that the requested element is created if it does not exist already.
+@param precalc_hashval Optional input parameter for sparse matrices. If the pointer is not NULL,
+the function does not recalculate the node hash value, but takes it from the specified location.
+It is useful for speeding up pair-wise operations (TODO: provide an example)
+*/
+CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL),
+ int create_node CV_DEFAULT(1),
+ unsigned* precalc_hashval CV_DEFAULT(NULL));
+
+/** @brief Return a specific array element.
+
+The functions return a specific array element. In the case of a sparse array the functions return 0
+if the requested node does not exist (no new node is created by the functions).
+@param arr Input array
+@param idx0 The first zero-based component of the element index
+ */
+CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 );
+/** @overload */
+CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 );
+/** @overload */
+CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 );
+/** @overload
+@param arr Input array
+@param idx Array of the element indices
+*/
+CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx );
+
+/** @brief Return a specific element of single-channel 1D, 2D, 3D or nD array.
+
+Returns a specific element of a single-channel array. If the array has multiple channels, a runtime
+error is raised. Note that Get?D functions can be used safely for both single-channel and
+multiple-channel arrays though they are a bit slower.
+
+In the case of a sparse array the functions return 0 if the requested node does not exist (no new
+node is created by the functions).
+@param arr Input array. Must have a single channel.
+@param idx0 The first zero-based component of the element index
+ */
+CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 );
+/** @overload */
+CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 );
+/** @overload */
+CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 );
+/** @overload
+@param arr Input array. Must have a single channel.
+@param idx Array of the element indices
+*/
+CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx );
+
+/** @brief Change the particular array element.
+
+The functions assign the new value to a particular array element. In the case of a sparse array the
+functions create the node if it does not exist yet.
+@param arr Input array
+@param idx0 The first zero-based component of the element index
+@param value The assigned value
+ */
+CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value );
+/** @overload */
+CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value );
+/** @overload */
+CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value );
+/** @overload
+@param arr Input array
+@param idx Array of the element indices
+@param value The assigned value
+*/
+CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value );
+
+/** @brief Change a specific array element.
+
+The functions assign a new value to a specific element of a single-channel array. If the array has
+multiple channels, a runtime error is raised. Note that the Set\*D function can be used safely for
+both single-channel and multiple-channel arrays, though they are a bit slower.
+
+In the case of a sparse array the functions create the node if it does not yet exist.
+@param arr Input array
+@param idx0 The first zero-based component of the element index
+@param value The assigned value
+ */
+CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value );
+/** @overload */
+CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value );
+/** @overload */
+CVAPI(void) cvSetReal3D( CvArr* arr, int idx0,
+ int idx1, int idx2, double value );
+/** @overload
+@param arr Input array
+@param idx Array of the element indices
+@param value The assigned value
+*/
+CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value );
+
+/** clears element of ND dense array,
+ in case of sparse arrays it deletes the specified node */
+CVAPI(void) cvClearND( CvArr* arr, const int* idx );
+
+/** @brief Returns matrix header for arbitrary array.
+
+The function returns a matrix header for the input array that can be a matrix - CvMat, an image -
+IplImage, or a multi-dimensional dense array - CvMatND (the third option is allowed only if
+allowND != 0) . In the case of matrix the function simply returns the input pointer. In the case of
+IplImage\* or CvMatND it initializes the header structure with parameters of the current image ROI
+and returns &header. Because COI is not supported by CvMat, it is returned separately.
+
+The function provides an easy way to handle both types of arrays - IplImage and CvMat using the same
+code. Input array must have non-zero data pointer, otherwise the function will report an error.
+
+@note If the input array is IplImage with planar data layout and COI set, the function returns the
+pointer to the selected plane and COI == 0. This feature allows user to process IplImage structures
+with planar data layout, even though OpenCV does not support such images.
+@param arr Input array
+@param header Pointer to CvMat structure used as a temporary buffer
+@param coi Optional output parameter for storing COI
+@param allowND If non-zero, the function accepts multi-dimensional dense arrays (CvMatND\*) and
+returns 2D matrix (if CvMatND has two dimensions) or 1D matrix (when CvMatND has 1 dimension or
+more than 2 dimensions). The CvMatND array must be continuous.
+@sa cvGetImage, cvarrToMat.
+ */
+CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header,
+ int* coi CV_DEFAULT(NULL),
+ int allowND CV_DEFAULT(0));
+
+/** @brief Returns image header for arbitrary array.
+
+The function returns the image header for the input array that can be a matrix (CvMat) or image
+(IplImage). In the case of an image the function simply returns the input pointer. In the case of
+CvMat it initializes an image_header structure with the parameters of the input matrix. Note that
+if we transform IplImage to CvMat using cvGetMat and then transform CvMat back to IplImage using
+this function, we will get different headers if the ROI is set in the original image.
+@param arr Input array
+@param image_header Pointer to IplImage structure used as a temporary buffer
+ */
+CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header );
+
+
+/** @brief Changes the shape of a multi-dimensional array without copying the data.
+
+The function is an advanced version of cvReshape that can work with multi-dimensional arrays as
+well (though it can work with ordinary images and matrices) and change the number of dimensions.
+
+Below are the two samples from the cvReshape description rewritten using cvReshapeMatND:
+@code
+ IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);
+ IplImage gray_img_hdr, *gray_img;
+ gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0);
+ ...
+ int size[] = { 2, 2, 2 };
+ CvMatND* mat = cvCreateMatND(3, size, CV_32F);
+ CvMat row_header, *row;
+ row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0);
+@endcode
+In C, the header file for this function includes a convenient macro cvReshapeND that does away with
+the sizeof_header parameter. So, the lines containing the call to cvReshapeMatND in the examples
+may be replaced as follow:
+@code
+ gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
+ ...
+ row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0);
+@endcode
+@param arr Input array
+@param sizeof_header Size of output header to distinguish between IplImage, CvMat and CvMatND
+output headers
+@param header Output header to be filled
+@param new_cn New number of channels. new_cn = 0 means that the number of channels remains
+unchanged.
+@param new_dims New number of dimensions. new_dims = 0 means that the number of dimensions
+remains the same.
+@param new_sizes Array of new dimension sizes. Only new_dims-1 values are used, because the
+total number of elements must remain the same. Thus, if new_dims = 1, new_sizes array is not
+used.
+ */
+CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr,
+ int sizeof_header, CvArr* header,
+ int new_cn, int new_dims, int* new_sizes );
+
+#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \
+ cvReshapeMatND( (arr), sizeof(*(header)), (header), \
+ (new_cn), (new_dims), (new_sizes))
+
+/** @brief Changes shape of matrix/image without copying data.
+
+The function initializes the CvMat header so that it points to the same data as the original array
+but has a different shape - different number of channels, different number of rows, or both.
+
+The following example code creates one image buffer and two image headers, the first is for a
+320x240x3 image and the second is for a 960x240x1 image:
+@code
+ IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);
+ CvMat gray_mat_hdr;
+ IplImage gray_img_hdr, *gray_img;
+ cvReshape(color_img, &gray_mat_hdr, 1);
+ gray_img = cvGetImage(&gray_mat_hdr, &gray_img_hdr);
+@endcode
+And the next example converts a 3x3 matrix to a single 1x9 vector:
+@code
+ CvMat* mat = cvCreateMat(3, 3, CV_32F);
+ CvMat row_header, *row;
+ row = cvReshape(mat, &row_header, 0, 1);
+@endcode
+@param arr Input array
+@param header Output header to be filled
+@param new_cn New number of channels. 'new_cn = 0' means that the number of channels remains
+unchanged.
+@param new_rows New number of rows. 'new_rows = 0' means that the number of rows remains
+unchanged unless it needs to be changed according to new_cn value.
+*/
+CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header,
+ int new_cn, int new_rows CV_DEFAULT(0) );
+
+/** Repeats source 2d array several times in both horizontal and
+ vertical direction to fill destination array */
+CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst );
+
+/** @brief Allocates array data
+
+The function allocates image, matrix or multi-dimensional dense array data. Note that in the case of
+matrix types OpenCV allocation functions are used. In the case of IplImage they are used unless
+CV_TURN_ON_IPL_COMPATIBILITY() has been called before. In the latter case IPL functions are used
+to allocate the data.
+@param arr Array header
+ */
+CVAPI(void) cvCreateData( CvArr* arr );
+
+/** @brief Releases array data.
+
+The function releases the array data. In the case of CvMat or CvMatND it simply calls
+cvDecRefData(), that is the function can not deallocate external data. See also the note to
+cvCreateData .
+@param arr Array header
+ */
+CVAPI(void) cvReleaseData( CvArr* arr );
+
+/** @brief Assigns user data to the array header.
+
+The function assigns user data to the array header. Header should be initialized before using
+cvCreateMatHeader, cvCreateImageHeader, cvCreateMatNDHeader, cvInitMatHeader,
+cvInitImageHeader or cvInitMatNDHeader.
+@param arr Array header
+@param data User data
+@param step Full row length in bytes
+ */
+CVAPI(void) cvSetData( CvArr* arr, void* data, int step );
+
+/** @brief Retrieves low-level information about the array.
+
+The function fills output variables with low-level information about the array data. All output
+
+parameters are optional, so some of the pointers may be set to NULL. If the array is IplImage with
+ROI set, the parameters of ROI are returned.
+
+The following example shows how to get access to array elements. It computes absolute values of the
+array elements :
+@code
+ float* data;
+ int step;
+ CvSize size;
+
+ cvGetRawData(array, (uchar**)&data, &step, &size);
+ step /= sizeof(data[0]);
+
+ for(int y = 0; y < size.height; y++, data += step )
+ for(int x = 0; x < size.width; x++ )
+ data[x] = (float)fabs(data[x]);
+@endcode
+@param arr Array header
+@param data Output pointer to the whole image origin or ROI origin if ROI is set
+@param step Output full row length in bytes
+@param roi_size Output ROI size
+ */
+CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data,
+ int* step CV_DEFAULT(NULL),
+ CvSize* roi_size CV_DEFAULT(NULL));
+
+/** @brief Returns size of matrix or image ROI.
+
+The function returns number of rows (CvSize::height) and number of columns (CvSize::width) of the
+input matrix or image. In the case of image the size of ROI is returned.
+@param arr array header
+ */
+CVAPI(CvSize) cvGetSize( const CvArr* arr );
+
+/** @brief Copies one array to another.
+
+The function copies selected elements from an input array to an output array:
+
+\f[\texttt{dst} (I)= \texttt{src} (I) \quad \text{if} \quad \texttt{mask} (I) \ne 0.\f]
+
+If any of the passed arrays is of IplImage type, then its ROI and COI fields are used. Both arrays
+must have the same type, the same number of dimensions, and the same size. The function can also
+copy sparse arrays (mask is not supported in this case).
+@param src The source array
+@param dst The destination array
+@param mask Operation mask, 8-bit single channel array; specifies elements of the destination array
+to be changed
+ */
+CVAPI(void) cvCopy( const CvArr* src, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/** @brief Sets every element of an array to a given value.
+
+The function copies the scalar value to every selected element of the destination array:
+\f[\texttt{arr} (I)= \texttt{value} \quad \text{if} \quad \texttt{mask} (I) \ne 0\f]
+If array arr is of IplImage type, then is ROI used, but COI must not be set.
+@param arr The destination array
+@param value Fill value
+@param mask Operation mask, 8-bit single channel array; specifies elements of the destination
+array to be changed
+ */
+CVAPI(void) cvSet( CvArr* arr, CvScalar value,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/** @brief Clears the array.
+
+The function clears the array. In the case of dense arrays (CvMat, CvMatND or IplImage),
+cvZero(array) is equivalent to cvSet(array,cvScalarAll(0),0). In the case of sparse arrays all the
+elements are removed.
+@param arr Array to be cleared
+ */
+CVAPI(void) cvSetZero( CvArr* arr );
+#define cvZero cvSetZero
+
+
+/** Splits a multi-channel array into the set of single-channel arrays or
+ extracts particular [color] plane */
+CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1,
+ CvArr* dst2, CvArr* dst3 );
+
+/** Merges a set of single-channel arrays into the single multi-channel array
+ or inserts one particular [color] plane to the array */
+CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1,
+ const CvArr* src2, const CvArr* src3,
+ CvArr* dst );
+
+/** Copies several channels from input arrays to
+ certain channels of output arrays */
+CVAPI(void) cvMixChannels( const CvArr** src, int src_count,
+ CvArr** dst, int dst_count,
+ const int* from_to, int pair_count );
+
+/** @brief Converts one array to another with optional linear transformation.
+
+The function has several different purposes, and thus has several different names. It copies one
+array to another with optional scaling, which is performed first, and/or optional type conversion,
+performed after:
+
+\f[\texttt{dst} (I) = \texttt{scale} \texttt{src} (I) + ( \texttt{shift} _0, \texttt{shift} _1,...)\f]
+
+All the channels of multi-channel arrays are processed independently.
+
+The type of conversion is done with rounding and saturation, that is if the result of scaling +
+conversion can not be represented exactly by a value of the destination array element type, it is
+set to the nearest representable value on the real axis.
+@param src Source array
+@param dst Destination array
+@param scale Scale factor
+@param shift Value added to the scaled source array elements
+ */
+CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst,
+ double scale CV_DEFAULT(1),
+ double shift CV_DEFAULT(0) );
+#define cvCvtScale cvConvertScale
+#define cvScale cvConvertScale
+#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 )
+
+
+/** Performs linear transformation on every source array element,
+ stores absolute value of the result:
+ dst(x,y,c) = abs(scale*src(x,y,c)+shift).
+ destination array must have 8u type.
+ In other cases one may use cvConvertScale + cvAbsDiffS */
+CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst,
+ double scale CV_DEFAULT(1),
+ double shift CV_DEFAULT(0) );
+#define cvCvtScaleAbs cvConvertScaleAbs
+
+
+/** checks termination criteria validity and
+ sets eps to default_eps (if it is not set),
+ max_iter to default_max_iters (if it is not set)
+*/
+CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria,
+ double default_eps,
+ int default_max_iters );
+
+/****************************************************************************************\
+* Arithmetic, logic and comparison operations *
+\****************************************************************************************/
+
+/** dst(mask) = src1(mask) + src2(mask) */
+CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(mask) = src(mask) + value */
+CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(mask) = src1(mask) - src2(mask) */
+CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(mask) = src(mask) - value = src(mask) + (-value) */
+CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL))
+{
+ cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]),
+ dst, mask );
+}
+
+/** dst(mask) = value - src(mask) */
+CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = src1(idx) * src2(idx) * scale
+ (scaled element-wise multiplication of 2 arrays) */
+CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, double scale CV_DEFAULT(1) );
+
+/** element-wise division/inversion with scaling:
+ dst(idx) = src1(idx) * scale / src2(idx)
+ or dst(idx) = scale / src2(idx) if src1 == 0 */
+CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, double scale CV_DEFAULT(1));
+
+/** dst = src1 * scale + src2 */
+CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale,
+ const CvArr* src2, CvArr* dst );
+#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C)
+
+/** dst = src1 * alpha + src2 * beta + gamma */
+CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha,
+ const CvArr* src2, double beta,
+ double gamma, CvArr* dst );
+
+/** @brief Calculates the dot product of two arrays in Euclidean metrics.
+
+The function calculates and returns the Euclidean dot product of two arrays.
+
+\f[src1 \bullet src2 = \sum _I ( \texttt{src1} (I) \texttt{src2} (I))\f]
+
+In the case of multiple channel arrays, the results for all channels are accumulated. In particular,
+cvDotProduct(a,a) where a is a complex vector, will return \f$||\texttt{a}||^2\f$. The function can
+process multi-dimensional arrays, row by row, layer by layer, and so on.
+@param src1 The first source array
+@param src2 The second source array
+ */
+CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 );
+
+/** dst(idx) = src1(idx) & src2(idx) */
+CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = src(idx) & value */
+CVAPI(void) cvAndS( const CvArr* src, CvScalar value,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = src1(idx) | src2(idx) */
+CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = src(idx) | value */
+CVAPI(void) cvOrS( const CvArr* src, CvScalar value,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = src1(idx) ^ src2(idx) */
+CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = src(idx) ^ value */
+CVAPI(void) cvXorS( const CvArr* src, CvScalar value,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/** dst(idx) = ~src(idx) */
+CVAPI(void) cvNot( const CvArr* src, CvArr* dst );
+
+/** dst(idx) = lower(idx) <= src(idx) < upper(idx) */
+CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower,
+ const CvArr* upper, CvArr* dst );
+
+/** dst(idx) = lower <= src(idx) < upper */
+CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower,
+ CvScalar upper, CvArr* dst );
+
+#define CV_CMP_EQ 0
+#define CV_CMP_GT 1
+#define CV_CMP_GE 2
+#define CV_CMP_LT 3
+#define CV_CMP_LE 4
+#define CV_CMP_NE 5
+
+/** The comparison operation support single-channel arrays only.
+ Destination image should be 8uC1 or 8sC1 */
+
+/** dst(idx) = src1(idx) _cmp_op_ src2(idx) */
+CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op );
+
+/** dst(idx) = src1(idx) _cmp_op_ value */
+CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op );
+
+/** dst(idx) = min(src1(idx),src2(idx)) */
+CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/** dst(idx) = max(src1(idx),src2(idx)) */
+CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/** dst(idx) = min(src(idx),value) */
+CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst );
+
+/** dst(idx) = max(src(idx),value) */
+CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst );
+
+/** dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */
+CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/** dst(x,y,c) = abs(src(x,y,c) - value(c)) */
+CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value );
+#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0))
+
+/****************************************************************************************\
+* Math operations *
+\****************************************************************************************/
+
+/** Does cartesian->polar coordinates conversion.
+ Either of output components (magnitude or angle) is optional */
+CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y,
+ CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL),
+ int angle_in_degrees CV_DEFAULT(0));
+
+/** Does polar->cartesian coordinates conversion.
+ Either of output components (magnitude or angle) is optional.
+ If magnitude is missing it is assumed to be all 1's */
+CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle,
+ CvArr* x, CvArr* y,
+ int angle_in_degrees CV_DEFAULT(0));
+
+/** Does powering: dst(idx) = src(idx)^power */
+CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power );
+
+/** Does exponention: dst(idx) = exp(src(idx)).
+ Overflow is not handled yet. Underflow is handled.
+ Maximal relative error is ~7e-6 for single-precision input */
+CVAPI(void) cvExp( const CvArr* src, CvArr* dst );
+
+/** Calculates natural logarithms: dst(idx) = log(abs(src(idx))).
+ Logarithm of 0 gives large negative number(~-700)
+ Maximal relative error is ~3e-7 for single-precision output
+*/
+CVAPI(void) cvLog( const CvArr* src, CvArr* dst );
+
+/** Fast arctangent calculation */
+CVAPI(float) cvFastArctan( float y, float x );
+
+/** Fast cubic root calculation */
+CVAPI(float) cvCbrt( float value );
+
+#define CV_CHECK_RANGE 1
+#define CV_CHECK_QUIET 2
+/** Checks array values for NaNs, Infs or simply for too large numbers
+ (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set,
+ no runtime errors is raised (function returns zero value in case of "bad" values).
+ Otherwise cvError is called */
+CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0),
+ double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0));
+#define cvCheckArray cvCheckArr
+
+#define CV_RAND_UNI 0
+#define CV_RAND_NORMAL 1
+
+/** @brief Fills an array with random numbers and updates the RNG state.
+
+The function fills the destination array with uniformly or normally distributed random numbers.
+@param rng CvRNG state initialized by cvRNG
+@param arr The destination array
+@param dist_type Distribution type
+> - **CV_RAND_UNI** uniform distribution
+> - **CV_RAND_NORMAL** normal or Gaussian distribution
+@param param1 The first parameter of the distribution. In the case of a uniform distribution it is
+the inclusive lower boundary of the random numbers range. In the case of a normal distribution it
+is the mean value of the random numbers.
+@param param2 The second parameter of the distribution. In the case of a uniform distribution it
+is the exclusive upper boundary of the random numbers range. In the case of a normal distribution
+it is the standard deviation of the random numbers.
+@sa randu, randn, RNG::fill.
+ */
+CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type,
+ CvScalar param1, CvScalar param2 );
+
+CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng,
+ double iter_factor CV_DEFAULT(1.));
+
+#define CV_SORT_EVERY_ROW 0
+#define CV_SORT_EVERY_COLUMN 1
+#define CV_SORT_ASCENDING 0
+#define CV_SORT_DESCENDING 16
+
+CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL),
+ CvArr* idxmat CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(0));
+
+/** Finds real roots of a cubic equation */
+CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots );
+
+/** Finds all real and complex roots of a polynomial equation */
+CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2,
+ int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100));
+
+/****************************************************************************************\
+* Matrix operations *
+\****************************************************************************************/
+
+/** @brief Calculates the cross product of two 3D vectors.
+
+The function calculates the cross product of two 3D vectors:
+\f[\texttt{dst} = \texttt{src1} \times \texttt{src2}\f]
+or:
+\f[\begin{array}{l} \texttt{dst} _1 = \texttt{src1} _2 \texttt{src2} _3 - \texttt{src1} _3 \texttt{src2} _2 \\ \texttt{dst} _2 = \texttt{src1} _3 \texttt{src2} _1 - \texttt{src1} _1 \texttt{src2} _3 \\ \texttt{dst} _3 = \texttt{src1} _1 \texttt{src2} _2 - \texttt{src1} _2 \texttt{src2} _1 \end{array}\f]
+@param src1 The first source vector
+@param src2 The second source vector
+@param dst The destination vector
+ */
+CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/** Matrix transform: dst = A*B + C, C is optional */
+#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 )
+#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst))
+
+#define CV_GEMM_A_T 1
+#define CV_GEMM_B_T 2
+#define CV_GEMM_C_T 4
+/** Extended matrix transform:
+ dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */
+CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha,
+ const CvArr* src3, double beta, CvArr* dst,
+ int tABC CV_DEFAULT(0));
+#define cvMatMulAddEx cvGEMM
+
+/** Transforms each element of source array and stores
+ resultant vectors in destination array */
+CVAPI(void) cvTransform( const CvArr* src, CvArr* dst,
+ const CvMat* transmat,
+ const CvMat* shiftvec CV_DEFAULT(NULL));
+#define cvMatMulAddS cvTransform
+
+/** Does perspective transform on every element of input array */
+CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst,
+ const CvMat* mat );
+
+/** Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */
+CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order,
+ const CvArr* delta CV_DEFAULT(NULL),
+ double scale CV_DEFAULT(1.) );
+
+/** Tranposes matrix. Square matrices can be transposed in-place */
+CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst );
+#define cvT cvTranspose
+
+/** Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */
+CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) );
+
+/** Mirror array data around horizontal (flip=0),
+ vertical (flip=1) or both(flip=-1) axises:
+ cvFlip(src) flips images vertically and sequences horizontally (inplace) */
+CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL),
+ int flip_mode CV_DEFAULT(0));
+#define cvMirror cvFlip
+
+
+#define CV_SVD_MODIFY_A 1
+#define CV_SVD_U_T 2
+#define CV_SVD_V_T 4
+
+/** Performs Singular Value Decomposition of a matrix */
+CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL),
+ CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0));
+
+/** Performs Singular Value Back Substitution (solves A*X = B):
+ flags must be the same as in cvSVD */
+CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U,
+ const CvArr* V, const CvArr* B,
+ CvArr* X, int flags );
+
+#define CV_LU 0
+#define CV_SVD 1
+#define CV_SVD_SYM 2
+#define CV_CHOLESKY 3
+#define CV_QR 4
+#define CV_NORMAL 16
+
+/** Inverts matrix */
+CVAPI(double) cvInvert( const CvArr* src, CvArr* dst,
+ int method CV_DEFAULT(CV_LU));
+#define cvInv cvInvert
+
+/** Solves linear system (src1)*(dst) = (src2)
+ (returns 0 if src1 is a singular and CV_LU method is used) */
+CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst,
+ int method CV_DEFAULT(CV_LU));
+
+/** Calculates determinant of input matrix */
+CVAPI(double) cvDet( const CvArr* mat );
+
+/** Calculates trace of the matrix (sum of elements on the main diagonal) */
+CVAPI(CvScalar) cvTrace( const CvArr* mat );
+
+/** Finds eigen values and vectors of a symmetric matrix */
+CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals,
+ double eps CV_DEFAULT(0),
+ int lowindex CV_DEFAULT(-1),
+ int highindex CV_DEFAULT(-1));
+
+///* Finds selected eigen values and vectors of a symmetric matrix */
+//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals,
+// int lowindex, int highindex );
+
+/** Makes an identity matrix (mat_ij = i == j) */
+CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) );
+
+/** Fills matrix with given range of numbers */
+CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end );
+
+/** @anchor core_c_CovarFlags
+@name Flags for cvCalcCovarMatrix
+@see cvCalcCovarMatrix
+ @{
+*/
+
+/** flag for cvCalcCovarMatrix, transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */
+#define CV_COVAR_SCRAMBLED 0
+
+/** flag for cvCalcCovarMatrix, [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */
+#define CV_COVAR_NORMAL 1
+
+/** flag for cvCalcCovarMatrix, do not calc average (i.e. mean vector) - use the input vector instead
+ (useful for calculating covariance matrix by parts) */
+#define CV_COVAR_USE_AVG 2
+
+/** flag for cvCalcCovarMatrix, scale the covariance matrix coefficients by number of the vectors */
+#define CV_COVAR_SCALE 4
+
+/** flag for cvCalcCovarMatrix, all the input vectors are stored in a single matrix, as its rows */
+#define CV_COVAR_ROWS 8
+
+/** flag for cvCalcCovarMatrix, all the input vectors are stored in a single matrix, as its columns */
+#define CV_COVAR_COLS 16
+
+/** @} */
+
+/** Calculates covariation matrix for a set of vectors
+@see @ref core_c_CovarFlags "flags"
+*/
+CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count,
+ CvArr* cov_mat, CvArr* avg, int flags );
+
+#define CV_PCA_DATA_AS_ROW 0
+#define CV_PCA_DATA_AS_COL 1
+#define CV_PCA_USE_AVG 2
+CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean,
+ CvArr* eigenvals, CvArr* eigenvects, int flags );
+
+CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean,
+ const CvArr* eigenvects, CvArr* result );
+
+CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean,
+ const CvArr* eigenvects, CvArr* result );
+
+/** Calculates Mahalanobis(weighted) distance */
+CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat );
+#define cvMahalonobis cvMahalanobis
+
+/****************************************************************************************\
+* Array Statistics *
+\****************************************************************************************/
+
+/** Finds sum of array elements */
+CVAPI(CvScalar) cvSum( const CvArr* arr );
+
+/** Calculates number of non-zero pixels */
+CVAPI(int) cvCountNonZero( const CvArr* arr );
+
+/** Calculates mean value of array elements */
+CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) );
+
+/** Calculates mean and standard deviation of pixel values */
+CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/** Finds global minimum, maximum and their positions */
+CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val,
+ CvPoint* min_loc CV_DEFAULT(NULL),
+ CvPoint* max_loc CV_DEFAULT(NULL),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/** @anchor core_c_NormFlags
+ @name Flags for cvNorm and cvNormalize
+ @{
+*/
+#define CV_C 1
+#define CV_L1 2
+#define CV_L2 4
+#define CV_NORM_MASK 7
+#define CV_RELATIVE 8
+#define CV_DIFF 16
+#define CV_MINMAX 32
+
+#define CV_DIFF_C (CV_DIFF | CV_C)
+#define CV_DIFF_L1 (CV_DIFF | CV_L1)
+#define CV_DIFF_L2 (CV_DIFF | CV_L2)
+#define CV_RELATIVE_C (CV_RELATIVE | CV_C)
+#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1)
+#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2)
+/** @} */
+
+/** Finds norm, difference norm or relative difference norm for an array (or two arrays)
+@see ref core_c_NormFlags "flags"
+*/
+CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL),
+ int norm_type CV_DEFAULT(CV_L2),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/** @see ref core_c_NormFlags "flags" */
+CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst,
+ double a CV_DEFAULT(1.), double b CV_DEFAULT(0.),
+ int norm_type CV_DEFAULT(CV_L2),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/** @anchor core_c_ReduceFlags
+ @name Flags for cvReduce
+ @{
+*/
+#define CV_REDUCE_SUM 0
+#define CV_REDUCE_AVG 1
+#define CV_REDUCE_MAX 2
+#define CV_REDUCE_MIN 3
+/** @} */
+
+/** @see @ref core_c_ReduceFlags "flags" */
+CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1),
+ int op CV_DEFAULT(CV_REDUCE_SUM) );
+
+/****************************************************************************************\
+* Discrete Linear Transforms and Related Functions *
+\****************************************************************************************/
+
+/** @anchor core_c_DftFlags
+ @name Flags for cvDFT, cvDCT and cvMulSpectrums
+ @{
+ */
+#define CV_DXT_FORWARD 0
+#define CV_DXT_INVERSE 1
+#define CV_DXT_SCALE 2 /**< divide result by size of array */
+#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE)
+#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE
+#define CV_DXT_ROWS 4 /**< transform each row individually */
+#define CV_DXT_MUL_CONJ 8 /**< conjugate the second argument of cvMulSpectrums */
+/** @} */
+
+/** Discrete Fourier Transform:
+ complex->complex,
+ real->ccs (forward),
+ ccs->real (inverse)
+@see core_c_DftFlags "flags"
+*/
+CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags,
+ int nonzero_rows CV_DEFAULT(0) );
+#define cvFFT cvDFT
+
+/** Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y))
+@see core_c_DftFlags "flags"
+*/
+CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, int flags );
+
+/** Finds optimal DFT vector size >= size0 */
+CVAPI(int) cvGetOptimalDFTSize( int size0 );
+
+/** Discrete Cosine Transform
+@see core_c_DftFlags "flags"
+*/
+CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags );
+
+/****************************************************************************************\
+* Dynamic data structures *
+\****************************************************************************************/
+
+/** Calculates length of sequence slice (with support of negative indices). */
+CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq );
+
+
+/** Creates new memory storage.
+ block_size == 0 means that default,
+ somewhat optimal size, is used (currently, it is 64K) */
+CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0));
+
+
+/** Creates a memory storage that will borrow memory blocks from parent storage */
+CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent );
+
+
+/** Releases memory storage. All the children of a parent must be released before
+ the parent. A child storage returns all the blocks to parent when it is released */
+CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage );
+
+
+/** Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos)
+ to reuse memory allocated for the storage - cvClearSeq,cvClearSet ...
+ do not free any memory.
+ A child storage returns all the blocks to the parent when it is cleared */
+CVAPI(void) cvClearMemStorage( CvMemStorage* storage );
+
+/** Remember a storage "free memory" position */
+CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos );
+
+/** Restore a storage "free memory" position */
+CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos );
+
+/** Allocates continuous buffer of the specified size in the storage */
+CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size );
+
+/** Allocates string in memory storage */
+CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr,
+ int len CV_DEFAULT(-1) );
+
+/** Creates new empty sequence that will reside in the specified storage */
+CVAPI(CvSeq*) cvCreateSeq( int seq_flags, size_t header_size,
+ size_t elem_size, CvMemStorage* storage );
+
+/** Changes default size (granularity) of sequence blocks.
+ The default size is ~1Kbyte */
+CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems );
+
+
+/** Adds new element to the end of sequence. Returns pointer to the element */
+CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL));
+
+
+/** Adds new element to the beginning of sequence. Returns pointer to it */
+CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL));
+
+
+/** Removes the last element from sequence and optionally saves it */
+CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL));
+
+
+/** Removes the first element from sequence and optioanally saves it */
+CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL));
+
+
+#define CV_FRONT 1
+#define CV_BACK 0
+/** Adds several new elements to the end of sequence */
+CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements,
+ int count, int in_front CV_DEFAULT(0) );
+
+/** Removes several elements from the end of sequence and optionally saves them */
+CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements,
+ int count, int in_front CV_DEFAULT(0) );
+
+/** Inserts a new element in the middle of sequence.
+ cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */
+CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index,
+ const void* element CV_DEFAULT(NULL));
+
+/** Removes specified sequence element */
+CVAPI(void) cvSeqRemove( CvSeq* seq, int index );
+
+
+/** Removes all the elements from the sequence. The freed memory
+ can be reused later only by the same sequence unless cvClearMemStorage
+ or cvRestoreMemStoragePos is called */
+CVAPI(void) cvClearSeq( CvSeq* seq );
+
+
+/** Retrieves pointer to specified sequence element.
+ Negative indices are supported and mean counting from the end
+ (e.g -1 means the last sequence element) */
+CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index );
+
+/** Calculates index of the specified sequence element.
+ Returns -1 if element does not belong to the sequence */
+CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element,
+ CvSeqBlock** block CV_DEFAULT(NULL) );
+
+/** Initializes sequence writer. The new elements will be added to the end of sequence */
+CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer );
+
+
+/** Combination of cvCreateSeq and cvStartAppendToSeq */
+CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size,
+ int elem_size, CvMemStorage* storage,
+ CvSeqWriter* writer );
+
+/** Closes sequence writer, updates sequence header and returns pointer
+ to the resultant sequence
+ (which may be useful if the sequence was created using cvStartWriteSeq))
+*/
+CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer );
+
+
+/** Updates sequence header. May be useful to get access to some of previously
+ written elements via cvGetSeqElem or sequence reader */
+CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer );
+
+
+/** Initializes sequence reader.
+ The sequence can be read in forward or backward direction */
+CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader,
+ int reverse CV_DEFAULT(0) );
+
+
+/** Returns current sequence reader position (currently observed sequence element) */
+CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader );
+
+
+/** Changes sequence reader position. It may seek to an absolute or
+ to relative to the current position */
+CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index,
+ int is_relative CV_DEFAULT(0));
+
+/** Copies sequence content to a continuous piece of memory */
+CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements,
+ CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) );
+
+/** Creates sequence header for array.
+ After that all the operations on sequences that do not alter the content
+ can be applied to the resultant sequence */
+CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size,
+ int elem_size, void* elements, int total,
+ CvSeq* seq, CvSeqBlock* block );
+
+/** Extracts sequence slice (with or without copying sequence elements) */
+CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice,
+ CvMemStorage* storage CV_DEFAULT(NULL),
+ int copy_data CV_DEFAULT(0));
+
+CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL))
+{
+ return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 );
+}
+
+/** Removes sequence slice */
+CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice );
+
+/** Inserts a sequence or array into another sequence */
+CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );
+
+/** a < b ? -1 : a > b ? 1 : 0 */
+typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata );
+
+/** Sorts sequence in-place given element comparison function */
+CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) );
+
+/** Finds element in a [sorted] sequence */
+CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func,
+ int is_sorted, int* elem_idx,
+ void* userdata CV_DEFAULT(NULL) );
+
+/** Reverses order of sequence elements in-place */
+CVAPI(void) cvSeqInvert( CvSeq* seq );
+
+/** Splits sequence into one or more equivalence classes using the specified criteria */
+CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage,
+ CvSeq** labels, CvCmpFunc is_equal, void* userdata );
+
+/************ Internal sequence functions ************/
+CVAPI(void) cvChangeSeqBlock( void* reader, int direction );
+CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer );
+
+
+/** Creates a new set */
+CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size,
+ int elem_size, CvMemStorage* storage );
+
+/** Adds new element to the set and returns pointer to it */
+CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL),
+ CvSetElem** inserted_elem CV_DEFAULT(NULL) );
+
+/** Fast variant of cvSetAdd */
+CV_INLINE CvSetElem* cvSetNew( CvSet* set_header )
+{
+ CvSetElem* elem = set_header->free_elems;
+ if( elem )
+ {
+ set_header->free_elems = elem->next_free;
+ elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK;
+ set_header->active_count++;
+ }
+ else
+ cvSetAdd( set_header, NULL, &elem );
+ return elem;
+}
+
+/** Removes set element given its pointer */
+CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem )
+{
+ CvSetElem* _elem = (CvSetElem*)elem;
+ assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ );
+ _elem->next_free = set_header->free_elems;
+ _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG;
+ set_header->free_elems = _elem;
+ set_header->active_count--;
+}
+
+/** Removes element from the set by its index */
+CVAPI(void) cvSetRemove( CvSet* set_header, int index );
+
+/** Returns a set element by index. If the element doesn't belong to the set,
+ NULL is returned */
+CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx )
+{
+ CvSetElem* elem = (CvSetElem*)(void *)cvGetSeqElem( (CvSeq*)set_header, idx );
+ return elem && CV_IS_SET_ELEM( elem ) ? elem : 0;
+}
+
+/** Removes all the elements from the set */
+CVAPI(void) cvClearSet( CvSet* set_header );
+
+/** Creates new graph */
+CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size,
+ int vtx_size, int edge_size,
+ CvMemStorage* storage );
+
+/** Adds new vertex to the graph */
+CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL),
+ CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) );
+
+
+/** Removes vertex from the graph together with all incident edges */
+CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index );
+CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx );
+
+
+/** Link two vertices specifed by indices or pointers if they
+ are not connected or return pointer to already existing edge
+ connecting the vertices.
+ Functions return 1 if a new edge was created, 0 otherwise */
+CVAPI(int) cvGraphAddEdge( CvGraph* graph,
+ int start_idx, int end_idx,
+ const CvGraphEdge* edge CV_DEFAULT(NULL),
+ CvGraphEdge** inserted_edge CV_DEFAULT(NULL) );
+
+CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph,
+ CvGraphVtx* start_vtx, CvGraphVtx* end_vtx,
+ const CvGraphEdge* edge CV_DEFAULT(NULL),
+ CvGraphEdge** inserted_edge CV_DEFAULT(NULL) );
+
+/** Remove edge connecting two vertices */
+CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx );
+CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx,
+ CvGraphVtx* end_vtx );
+
+/** Find edge connecting two vertices */
+CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx );
+CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph,
+ const CvGraphVtx* start_vtx,
+ const CvGraphVtx* end_vtx );
+#define cvGraphFindEdge cvFindGraphEdge
+#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr
+
+/** Remove all vertices and edges from the graph */
+CVAPI(void) cvClearGraph( CvGraph* graph );
+
+
+/** Count number of edges incident to the vertex */
+CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx );
+CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx );
+
+
+/** Retrieves graph vertex by given index */
+#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx))
+
+/** Retrieves index of a graph vertex given its pointer */
+#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK)
+
+/** Retrieves index of a graph edge given its pointer */
+#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK)
+
+#define cvGraphGetVtxCount( graph ) ((graph)->active_count)
+#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count)
+
+#define CV_GRAPH_VERTEX 1
+#define CV_GRAPH_TREE_EDGE 2
+#define CV_GRAPH_BACK_EDGE 4
+#define CV_GRAPH_FORWARD_EDGE 8
+#define CV_GRAPH_CROSS_EDGE 16
+#define CV_GRAPH_ANY_EDGE 30
+#define CV_GRAPH_NEW_TREE 32
+#define CV_GRAPH_BACKTRACKING 64
+#define CV_GRAPH_OVER -1
+
+#define CV_GRAPH_ALL_ITEMS -1
+
+/** flags for graph vertices and edges */
+#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30)
+#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \
+ (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG)
+#define CV_IS_GRAPH_EDGE_VISITED(edge) \
+ (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG)
+#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29)
+#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28)
+
+typedef struct CvGraphScanner
+{
+ CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */
+ CvGraphVtx* dst; /* current graph edge destination vertex */
+ CvGraphEdge* edge; /* current edge */
+
+ CvGraph* graph; /* the graph */
+ CvSeq* stack; /* the graph vertex stack */
+ int index; /* the lower bound of certainly visited vertices */
+ int mask; /* event mask */
+}
+CvGraphScanner;
+
+/** Creates new graph scanner. */
+CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph,
+ CvGraphVtx* vtx CV_DEFAULT(NULL),
+ int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS));
+
+/** Releases graph scanner. */
+CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner );
+
+/** Get next graph element */
+CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner );
+
+/** Creates a copy of graph */
+CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage );
+
+
+/** Does look-up transformation. Elements of the source array
+ (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */
+CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut );
+
+
+/******************* Iteration through the sequence tree *****************/
+typedef struct CvTreeNodeIterator
+{
+ const void* node;
+ int level;
+ int max_level;
+}
+CvTreeNodeIterator;
+
+CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator,
+ const void* first, int max_level );
+CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator );
+CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator );
+
+/** Inserts sequence into tree with specified "parent" sequence.
+ If parent is equal to frame (e.g. the most external contour),
+ then added contour will have null pointer to parent. */
+CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame );
+
+/** Removes contour from tree (together with the contour children). */
+CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame );
+
+/** Gathers pointers to all the sequences,
+ accessible from the `first`, to the single sequence */
+CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size,
+ CvMemStorage* storage );
+
+/** The function implements the K-means algorithm for clustering an array of sample
+ vectors in a specified number of classes */
+#define CV_KMEANS_USE_INITIAL_LABELS 1
+CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels,
+ CvTermCriteria termcrit, int attempts CV_DEFAULT(1),
+ CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0),
+ CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) );
+
+/****************************************************************************************\
+* System functions *
+\****************************************************************************************/
+
+/** Loads optimized functions from IPP, MKL etc. or switches back to pure C code */
+CVAPI(int) cvUseOptimized( int on_off );
+
+typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader)
+ (int,int,int,char*,char*,int,int,int,int,int,
+ IplROI*,IplImage*,void*,IplTileInfo*);
+typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int);
+typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int);
+typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int);
+typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*);
+
+/** @brief Makes OpenCV use IPL functions for allocating IplImage and IplROI structures.
+
+Normally, the function is not called directly. Instead, a simple macro
+CV_TURN_ON_IPL_COMPATIBILITY() is used that calls cvSetIPLAllocators and passes there pointers
+to IPL allocation functions. :
+@code
+ ...
+ CV_TURN_ON_IPL_COMPATIBILITY()
+ ...
+@endcode
+@param create_header pointer to a function, creating IPL image header.
+@param allocate_data pointer to a function, allocating IPL image data.
+@param deallocate pointer to a function, deallocating IPL image.
+@param create_roi pointer to a function, creating IPL image ROI (i.e. Region of Interest).
+@param clone_image pointer to a function, cloning an IPL image.
+ */
+CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header,
+ Cv_iplAllocateImageData allocate_data,
+ Cv_iplDeallocate deallocate,
+ Cv_iplCreateROI create_roi,
+ Cv_iplCloneImage clone_image );
+
+#define CV_TURN_ON_IPL_COMPATIBILITY() \
+ cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \
+ iplDeallocate, iplCreateROI, iplCloneImage )
+
+/****************************************************************************************\
+* Data Persistence *
+\****************************************************************************************/
+
+/********************************** High-level functions ********************************/
+
+/** @brief Opens file storage for reading or writing data.
+
+The function opens file storage for reading or writing data. In the latter case, a new file is
+created or an existing file is rewritten. The type of the read or written file is determined by the
+filename extension: .xml for XML and .yml or .yaml for YAML. The function returns a pointer to the
+CvFileStorage structure. If the file cannot be opened then the function returns NULL.
+@param filename Name of the file associated with the storage
+@param memstorage Memory storage used for temporary data and for
+: storing dynamic structures, such as CvSeq or CvGraph . If it is NULL, a temporary memory
+ storage is created and used.
+@param flags Can be one of the following:
+> - **CV_STORAGE_READ** the storage is open for reading
+> - **CV_STORAGE_WRITE** the storage is open for writing
+@param encoding
+ */
+CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage,
+ int flags, const char* encoding CV_DEFAULT(NULL) );
+
+/** @brief Releases file storage.
+
+The function closes the file associated with the storage and releases all the temporary structures.
+It must be called after all I/O operations with the storage are finished.
+@param fs Double pointer to the released file storage
+ */
+CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs );
+
+/** returns attribute value or 0 (NULL) if there is no such attribute */
+CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name );
+
+/** @brief Starts writing a new structure.
+
+The function starts writing a compound structure (collection) that can be a sequence or a map. After
+all the structure fields, which can be scalars or structures, are written, cvEndWriteStruct should
+be called. The function can be used to group some objects or to implement the write function for a
+some user object (see CvTypeInfo).
+@param fs File storage
+@param name Name of the written structure. The structure can be accessed by this name when the
+storage is read.
+@param struct_flags A combination one of the following values:
+- **CV_NODE_SEQ** the written structure is a sequence (see discussion of CvFileStorage ),
+ that is, its elements do not have a name.
+- **CV_NODE_MAP** the written structure is a map (see discussion of CvFileStorage ), that
+ is, all its elements have names.
+One and only one of the two above flags must be specified
+- **CV_NODE_FLOW** the optional flag that makes sense only for YAML streams. It means that
+ the structure is written as a flow (not as a block), which is more compact. It is
+ recommended to use this flag for structures or arrays whose elements are all scalars.
+@param type_name Optional parameter - the object type name. In
+ case of XML it is written as a type_id attribute of the structure opening tag. In the case of
+ YAML it is written after a colon following the structure name (see the example in
+ CvFileStorage description). Mainly it is used with user objects. When the storage is read, the
+ encoded type name is used to determine the object type (see CvTypeInfo and cvFindType ).
+@param attributes This parameter is not used in the current implementation
+ */
+CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name,
+ int struct_flags, const char* type_name CV_DEFAULT(NULL),
+ CvAttrList attributes CV_DEFAULT(cvAttrList()));
+
+/** @brief Finishes writing to a file node collection.
+@param fs File storage
+@sa cvStartWriteStruct.
+ */
+CVAPI(void) cvEndWriteStruct( CvFileStorage* fs );
+
+/** @brief Writes an integer value.
+
+The function writes a single integer value (with or without a name) to the file storage.
+@param fs File storage
+@param name Name of the written value. Should be NULL if and only if the parent structure is a
+sequence.
+@param value The written value
+ */
+CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value );
+
+/** @brief Writes a floating-point value.
+
+The function writes a single floating-point value (with or without a name) to file storage. Special
+values are encoded as follows: NaN (Not A Number) as .NaN, infinity as +.Inf or -.Inf.
+
+The following example shows how to use the low-level writing functions to store custom structures,
+such as termination criteria, without registering a new type. :
+@code
+ void write_termcriteria( CvFileStorage* fs, const char* struct_name,
+ CvTermCriteria* termcrit )
+ {
+ cvStartWriteStruct( fs, struct_name, CV_NODE_MAP, NULL, cvAttrList(0,0));
+ cvWriteComment( fs, "termination criteria", 1 ); // just a description
+ if( termcrit->type & CV_TERMCRIT_ITER )
+ cvWriteInteger( fs, "max_iterations", termcrit->max_iter );
+ if( termcrit->type & CV_TERMCRIT_EPS )
+ cvWriteReal( fs, "accuracy", termcrit->epsilon );
+ cvEndWriteStruct( fs );
+ }
+@endcode
+@param fs File storage
+@param name Name of the written value. Should be NULL if and only if the parent structure is a
+sequence.
+@param value The written value
+*/
+CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value );
+
+/** @brief Writes a text string.
+
+The function writes a text string to file storage.
+@param fs File storage
+@param name Name of the written string . Should be NULL if and only if the parent structure is a
+sequence.
+@param str The written text string
+@param quote If non-zero, the written string is put in quotes, regardless of whether they are
+required. Otherwise, if the flag is zero, quotes are used only when they are required (e.g. when
+the string starts with a digit or contains spaces).
+ */
+CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name,
+ const char* str, int quote CV_DEFAULT(0) );
+
+/** @brief Writes a comment.
+
+The function writes a comment into file storage. The comments are skipped when the storage is read.
+@param fs File storage
+@param comment The written comment, single-line or multi-line
+@param eol_comment If non-zero, the function tries to put the comment at the end of current line.
+If the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current
+line, the comment starts a new line.
+ */
+CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment,
+ int eol_comment );
+
+/** @brief Writes an object to file storage.
+
+The function writes an object to file storage. First, the appropriate type info is found using
+cvTypeOf. Then, the write method associated with the type info is called.
+
+Attributes are used to customize the writing procedure. The standard types support the following
+attributes (all the dt attributes have the same format as in cvWriteRawData):
+
+-# CvSeq
+ - **header_dt** description of user fields of the sequence header that follow CvSeq, or
+ CvChain (if the sequence is a Freeman chain) or CvContour (if the sequence is a contour or
+ point sequence)
+ - **dt** description of the sequence elements.
+ - **recursive** if the attribute is present and is not equal to "0" or "false", the whole
+ tree of sequences (contours) is stored.
+-# CvGraph
+ - **header_dt** description of user fields of the graph header that follows CvGraph;
+ - **vertex_dt** description of user fields of graph vertices
+ - **edge_dt** description of user fields of graph edges (note that the edge weight is
+ always written, so there is no need to specify it explicitly)
+
+Below is the code that creates the YAML file shown in the CvFileStorage description:
+@code
+ #include "cxcore.h"
+
+ int main( int argc, char** argv )
+ {
+ CvMat* mat = cvCreateMat( 3, 3, CV_32F );
+ CvFileStorage* fs = cvOpenFileStorage( "example.yml", 0, CV_STORAGE_WRITE );
+
+ cvSetIdentity( mat );
+ cvWrite( fs, "A", mat, cvAttrList(0,0) );
+
+ cvReleaseFileStorage( &fs );
+ cvReleaseMat( &mat );
+ return 0;
+ }
+@endcode
+@param fs File storage
+@param name Name of the written object. Should be NULL if and only if the parent structure is a
+sequence.
+@param ptr Pointer to the object
+@param attributes The attributes of the object. They are specific for each particular type (see
+the discussion below).
+ */
+CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr,
+ CvAttrList attributes CV_DEFAULT(cvAttrList()));
+
+/** @brief Starts the next stream.
+
+The function finishes the currently written stream and starts the next stream. In the case of XML
+the file with multiple streams looks like this:
+@code{.xml}
+
+
+
+
+
+
+ ...
+@endcode
+The YAML file will look like this:
+@code{.yaml}
+ %YAML:1.0
+ # stream #1 data
+ ...
+ ---
+ # stream #2 data
+@endcode
+This is useful for concatenating files or for resuming the writing process.
+@param fs File storage
+ */
+CVAPI(void) cvStartNextStream( CvFileStorage* fs );
+
+/** @brief Writes multiple numbers.
+
+The function writes an array, whose elements consist of single or multiple numbers. The function
+call can be replaced with a loop containing a few cvWriteInt and cvWriteReal calls, but a single
+call is more efficient. Note that because none of the elements have a name, they should be written
+to a sequence rather than a map.
+@param fs File storage
+@param src Pointer to the written array
+@param len Number of the array elements to write
+@param dt Specification of each array element, see @ref format_spec "format specification"
+ */
+CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src,
+ int len, const char* dt );
+
+/** @brief Returns a unique pointer for a given name.
+
+The function returns a unique pointer for each particular file node name. This pointer can be then
+passed to the cvGetFileNode function that is faster than cvGetFileNodeByName because it compares
+text strings by comparing pointers rather than the strings' content.
+
+Consider the following example where an array of points is encoded as a sequence of 2-entry maps:
+@code
+ points:
+ - { x: 10, y: 10 }
+ - { x: 20, y: 20 }
+ - { x: 30, y: 30 }
+ # ...
+@endcode
+Then, it is possible to get hashed "x" and "y" pointers to speed up decoding of the points. :
+@code
+ #include "cxcore.h"
+
+ int main( int argc, char** argv )
+ {
+ CvFileStorage* fs = cvOpenFileStorage( "points.yml", 0, CV_STORAGE_READ );
+ CvStringHashNode* x_key = cvGetHashedNode( fs, "x", -1, 1 );
+ CvStringHashNode* y_key = cvGetHashedNode( fs, "y", -1, 1 );
+ CvFileNode* points = cvGetFileNodeByName( fs, 0, "points" );
+
+ if( CV_NODE_IS_SEQ(points->tag) )
+ {
+ CvSeq* seq = points->data.seq;
+ int i, total = seq->total;
+ CvSeqReader reader;
+ cvStartReadSeq( seq, &reader, 0 );
+ for( i = 0; i < total; i++ )
+ {
+ CvFileNode* pt = (CvFileNode*)reader.ptr;
+ #if 1 // faster variant
+ CvFileNode* xnode = cvGetFileNode( fs, pt, x_key, 0 );
+ CvFileNode* ynode = cvGetFileNode( fs, pt, y_key, 0 );
+ assert( xnode && CV_NODE_IS_INT(xnode->tag) &&
+ ynode && CV_NODE_IS_INT(ynode->tag));
+ int x = xnode->data.i; // or x = cvReadInt( xnode, 0 );
+ int y = ynode->data.i; // or y = cvReadInt( ynode, 0 );
+ #elif 1 // slower variant; does not use x_key & y_key
+ CvFileNode* xnode = cvGetFileNodeByName( fs, pt, "x" );
+ CvFileNode* ynode = cvGetFileNodeByName( fs, pt, "y" );
+ assert( xnode && CV_NODE_IS_INT(xnode->tag) &&
+ ynode && CV_NODE_IS_INT(ynode->tag));
+ int x = xnode->data.i; // or x = cvReadInt( xnode, 0 );
+ int y = ynode->data.i; // or y = cvReadInt( ynode, 0 );
+ #else // the slowest yet the easiest to use variant
+ int x = cvReadIntByName( fs, pt, "x", 0 );
+ int y = cvReadIntByName( fs, pt, "y", 0 );
+ #endif
+ CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
+ printf("
+ }
+ }
+ cvReleaseFileStorage( &fs );
+ return 0;
+ }
+@endcode
+Please note that whatever method of accessing a map you are using, it is still much slower than
+using plain sequences; for example, in the above example, it is more efficient to encode the points
+as pairs of integers in a single numeric sequence.
+@param fs File storage
+@param name Literal node name
+@param len Length of the name (if it is known apriori), or -1 if it needs to be calculated
+@param create_missing Flag that specifies, whether an absent key should be added into the hash table
+*/
+CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name,
+ int len CV_DEFAULT(-1),
+ int create_missing CV_DEFAULT(0));
+
+/** @brief Retrieves one of the top-level nodes of the file storage.
+
+The function returns one of the top-level file nodes. The top-level nodes do not have a name, they
+correspond to the streams that are stored one after another in the file storage. If the index is out
+of range, the function returns a NULL pointer, so all the top-level nodes can be iterated by
+subsequent calls to the function with stream_index=0,1,..., until the NULL pointer is returned.
+This function can be used as a base for recursive traversal of the file storage.
+@param fs File storage
+@param stream_index Zero-based index of the stream. See cvStartNextStream . In most cases,
+there is only one stream in the file; however, there can be several.
+ */
+CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs,
+ int stream_index CV_DEFAULT(0) );
+
+/** @brief Finds a node in a map or file storage.
+
+The function finds a file node. It is a faster version of cvGetFileNodeByName (see
+cvGetHashedKey discussion). Also, the function can insert a new node, if it is not in the map yet.
+@param fs File storage
+@param map The parent map. If it is NULL, the function searches a top-level node. If both map and
+key are NULLs, the function returns the root file node - a map that contains top-level nodes.
+@param key Unique pointer to the node name, retrieved with cvGetHashedKey
+@param create_missing Flag that specifies whether an absent node should be added to the map
+ */
+CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map,
+ const CvStringHashNode* key,
+ int create_missing CV_DEFAULT(0) );
+
+/** @brief Finds a node in a map or file storage.
+
+The function finds a file node by name. The node is searched either in map or, if the pointer is
+NULL, among the top-level file storage nodes. Using this function for maps and cvGetSeqElem (or
+sequence reader) for sequences, it is possible to navigate through the file storage. To speed up
+multiple queries for a certain key (e.g., in the case of an array of structures) one may use a
+combination of cvGetHashedKey and cvGetFileNode.
+@param fs File storage
+@param map The parent map. If it is NULL, the function searches in all the top-level nodes
+(streams), starting with the first one.
+@param name The file node name
+ */
+CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs,
+ const CvFileNode* map,
+ const char* name );
+
+/** @brief Retrieves an integer value from a file node.
+
+The function returns an integer that is represented by the file node. If the file node is NULL, the
+default_value is returned (thus, it is convenient to call the function right after cvGetFileNode
+without checking for a NULL pointer). If the file node has type CV_NODE_INT, then node-\>data.i is
+returned. If the file node has type CV_NODE_REAL, then node-\>data.f is converted to an integer
+and returned. Otherwise the error is reported.
+@param node File node
+@param default_value The value that is returned if node is NULL
+ */
+CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) )
+{
+ return !node ? default_value :
+ CV_NODE_IS_INT(node->tag) ? node->data.i :
+ CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff;
+}
+
+/** @brief Finds a file node and returns its value.
+
+The function is a simple superposition of cvGetFileNodeByName and cvReadInt.
+@param fs File storage
+@param map The parent map. If it is NULL, the function searches a top-level node.
+@param name The node name
+@param default_value The value that is returned if the file node is not found
+ */
+CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map,
+ const char* name, int default_value CV_DEFAULT(0) )
+{
+ return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value );
+}
+
+/** @brief Retrieves a floating-point value from a file node.
+
+The function returns a floating-point value that is represented by the file node. If the file node
+is NULL, the default_value is returned (thus, it is convenient to call the function right after
+cvGetFileNode without checking for a NULL pointer). If the file node has type CV_NODE_REAL ,
+then node-\>data.f is returned. If the file node has type CV_NODE_INT , then node-:math:\>data.f
+is converted to floating-point and returned. Otherwise the result is not determined.
+@param node File node
+@param default_value The value that is returned if node is NULL
+ */
+CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) )
+{
+ return !node ? default_value :
+ CV_NODE_IS_INT(node->tag) ? (double)node->data.i :
+ CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300;
+}
+
+/** @brief Finds a file node and returns its value.
+
+The function is a simple superposition of cvGetFileNodeByName and cvReadReal .
+@param fs File storage
+@param map The parent map. If it is NULL, the function searches a top-level node.
+@param name The node name
+@param default_value The value that is returned if the file node is not found
+ */
+CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map,
+ const char* name, double default_value CV_DEFAULT(0.) )
+{
+ return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value );
+}
+
+/** @brief Retrieves a text string from a file node.
+
+The function returns a text string that is represented by the file node. If the file node is NULL,
+the default_value is returned (thus, it is convenient to call the function right after
+cvGetFileNode without checking for a NULL pointer). If the file node has type CV_NODE_STR , then
+node-:math:\>data.str.ptr is returned. Otherwise the result is not determined.
+@param node File node
+@param default_value The value that is returned if node is NULL
+ */
+CV_INLINE const char* cvReadString( const CvFileNode* node,
+ const char* default_value CV_DEFAULT(NULL) )
+{
+ return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0;
+}
+
+/** @brief Finds a file node by its name and returns its value.
+
+The function is a simple superposition of cvGetFileNodeByName and cvReadString .
+@param fs File storage
+@param map The parent map. If it is NULL, the function searches a top-level node.
+@param name The node name
+@param default_value The value that is returned if the file node is not found
+ */
+CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map,
+ const char* name, const char* default_value CV_DEFAULT(NULL) )
+{
+ return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value );
+}
+
+
+/** @brief Decodes an object and returns a pointer to it.
+
+The function decodes a user object (creates an object in a native representation from the file
+storage subtree) and returns it. The object to be decoded must be an instance of a registered type
+that supports the read method (see CvTypeInfo). The type of the object is determined by the type
+name that is encoded in the file. If the object is a dynamic structure, it is created either in
+memory storage and passed to cvOpenFileStorage or, if a NULL pointer was passed, in temporary
+memory storage, which is released when cvReleaseFileStorage is called. Otherwise, if the object is
+not a dynamic structure, it is created in a heap and should be released with a specialized function
+or by using the generic cvRelease.
+@param fs File storage
+@param node The root object node
+@param attributes Unused parameter
+ */
+CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node,
+ CvAttrList* attributes CV_DEFAULT(NULL));
+
+/** @brief Finds an object by name and decodes it.
+
+The function is a simple superposition of cvGetFileNodeByName and cvRead.
+@param fs File storage
+@param map The parent map. If it is NULL, the function searches a top-level node.
+@param name The node name
+@param attributes Unused parameter
+ */
+CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map,
+ const char* name, CvAttrList* attributes CV_DEFAULT(NULL) )
+{
+ return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes );
+}
+
+
+/** @brief Initializes the file node sequence reader.
+
+The function initializes the sequence reader to read data from a file node. The initialized reader
+can be then passed to cvReadRawDataSlice.
+@param fs File storage
+@param src The file node (a sequence) to read numbers from
+@param reader Pointer to the sequence reader
+ */
+CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src,
+ CvSeqReader* reader );
+
+/** @brief Initializes file node sequence reader.
+
+The function reads one or more elements from the file node, representing a sequence, to a
+user-specified array. The total number of read sequence elements is a product of total and the
+number of components in each array element. For example, if dt=2if, the function will read total\*3
+sequence elements. As with any sequence, some parts of the file node sequence can be skipped or read
+repeatedly by repositioning the reader using cvSetSeqReaderPos.
+@param fs File storage
+@param reader The sequence reader. Initialize it with cvStartReadRawData .
+@param count The number of elements to read
+@param dst Pointer to the destination array
+@param dt Specification of each array element. It has the same format as in cvWriteRawData .
+ */
+CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader,
+ int count, void* dst, const char* dt );
+
+/** @brief Reads multiple numbers.
+
+The function reads elements from a file node that represents a sequence of scalars.
+@param fs File storage
+@param src The file node (a sequence) to read numbers from
+@param dst Pointer to the destination array
+@param dt Specification of each array element. It has the same format as in cvWriteRawData .
+ */
+CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src,
+ void* dst, const char* dt );
+
+/** @brief Writes a file node to another file storage.
+
+The function writes a copy of a file node to file storage. Possible applications of the function are
+merging several file storages into one and conversion between XML and YAML formats.
+@param fs Destination file storage
+@param new_node_name New name of the file node in the destination file storage. To keep the
+existing name, use cvcvGetFileNodeName
+@param node The written node
+@param embed If the written node is a collection and this parameter is not zero, no extra level of
+hierarchy is created. Instead, all the elements of node are written into the currently written
+structure. Of course, map elements can only be embedded into another map, and sequence elements
+can only be embedded into another sequence.
+ */
+CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name,
+ const CvFileNode* node, int embed );
+
+/** @brief Returns the name of a file node.
+
+The function returns the name of a file node or NULL, if the file node does not have a name or if
+node is NULL.
+@param node File node
+ */
+CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node );
+
+/*********************************** Adding own types ***********************************/
+
+/** @brief Registers a new type.
+
+The function registers a new type, which is described by info . The function creates a copy of the
+structure, so the user should delete it after calling the function.
+@param info Type info structure
+ */
+CVAPI(void) cvRegisterType( const CvTypeInfo* info );
+
+/** @brief Unregisters the type.
+
+The function unregisters a type with a specified name. If the name is unknown, it is possible to
+locate the type info by an instance of the type using cvTypeOf or by iterating the type list,
+starting from cvFirstType, and then calling cvUnregisterType(info-\>typeName).
+@param type_name Name of an unregistered type
+ */
+CVAPI(void) cvUnregisterType( const char* type_name );
+
+/** @brief Returns the beginning of a type list.
+
+The function returns the first type in the list of registered types. Navigation through the list can
+be done via the prev and next fields of the CvTypeInfo structure.
+ */
+CVAPI(CvTypeInfo*) cvFirstType(void);
+
+/** @brief Finds a type by its name.
+
+The function finds a registered type by its name. It returns NULL if there is no type with the
+specified name.
+@param type_name Type name
+ */
+CVAPI(CvTypeInfo*) cvFindType( const char* type_name );
+
+/** @brief Returns the type of an object.
+
+The function finds the type of a given object. It iterates through the list of registered types and
+calls the is_instance function/method for every type info structure with that object until one of
+them returns non-zero or until the whole list has been traversed. In the latter case, the function
+returns NULL.
+@param struct_ptr The object pointer
+ */
+CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr );
+
+/** @brief Releases an object.
+
+The function finds the type of a given object and calls release with the double pointer.
+@param struct_ptr Double pointer to the object
+ */
+CVAPI(void) cvRelease( void** struct_ptr );
+
+/** @brief Makes a clone of an object.
+
+The function finds the type of a given object and calls clone with the passed object. Of course, if
+you know the object type, for example, struct_ptr is CvMat\*, it is faster to call the specific
+function, like cvCloneMat.
+@param struct_ptr The object to clone
+ */
+CVAPI(void*) cvClone( const void* struct_ptr );
+
+/** @brief Saves an object to a file.
+
+The function saves an object to a file. It provides a simple interface to cvWrite .
+@param filename File name
+@param struct_ptr Object to save
+@param name Optional object name. If it is NULL, the name will be formed from filename .
+@param comment Optional comment to put in the beginning of the file
+@param attributes Optional attributes passed to cvWrite
+ */
+CVAPI(void) cvSave( const char* filename, const void* struct_ptr,
+ const char* name CV_DEFAULT(NULL),
+ const char* comment CV_DEFAULT(NULL),
+ CvAttrList attributes CV_DEFAULT(cvAttrList()));
+
+/** @brief Loads an object from a file.
+
+The function loads an object from a file. It basically reads the specified file, find the first
+top-level node and calls cvRead for that node. If the file node does not have type information or
+the type information can not be found by the type name, the function returns NULL. After the object
+is loaded, the file storage is closed and all the temporary buffers are deleted. Thus, to load a
+dynamic structure, such as a sequence, contour, or graph, one should pass a valid memory storage
+destination to the function.
+@param filename File name
+@param memstorage Memory storage for dynamic structures, such as CvSeq or CvGraph . It is not used
+for matrices or images.
+@param name Optional object name. If it is NULL, the first top-level object in the storage will be
+loaded.
+@param real_name Optional output parameter that will contain the name of the loaded object
+(useful if name=NULL )
+ */
+CVAPI(void*) cvLoad( const char* filename,
+ CvMemStorage* memstorage CV_DEFAULT(NULL),
+ const char* name CV_DEFAULT(NULL),
+ const char** real_name CV_DEFAULT(NULL) );
+
+/*********************************** Measuring Execution Time ***************************/
+
+/** helper functions for RNG initialization and accurate time measurement:
+ uses internal clock counter on x86 */
+CVAPI(int64) cvGetTickCount( void );
+CVAPI(double) cvGetTickFrequency( void );
+
+/*********************************** CPU capabilities ***********************************/
+
+CVAPI(int) cvCheckHardwareSupport(int feature);
+
+/*********************************** Multi-Threading ************************************/
+
+/** retrieve/set the number of threads used in OpenMP implementations */
+CVAPI(int) cvGetNumThreads( void );
+CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) );
+/** get index of the thread being executed */
+CVAPI(int) cvGetThreadNum( void );
+
+
+/********************************** Error Handling **************************************/
+
+/** Get current OpenCV error status */
+CVAPI(int) cvGetErrStatus( void );
+
+/** Sets error status silently */
+CVAPI(void) cvSetErrStatus( int status );
+
+#define CV_ErrModeLeaf 0 /* Print error and exit program */
+#define CV_ErrModeParent 1 /* Print error and continue */
+#define CV_ErrModeSilent 2 /* Don't print and continue */
+
+/** Retrives current error processing mode */
+CVAPI(int) cvGetErrMode( void );
+
+/** Sets error processing mode, returns previously used mode */
+CVAPI(int) cvSetErrMode( int mode );
+
+/** Sets error status and performs some additonal actions (displaying message box,
+ writing message to stderr, terminating application etc.)
+ depending on the current error mode */
+CVAPI(void) cvError( int status, const char* func_name,
+ const char* err_msg, const char* file_name, int line );
+
+/** Retrieves textual description of the error given its code */
+CVAPI(const char*) cvErrorStr( int status );
+
+/** Retrieves detailed information about the last error occured */
+CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description,
+ const char** filename, int* line );
+
+/** Maps IPP error codes to the counterparts from OpenCV */
+CVAPI(int) cvErrorFromIppStatus( int ipp_status );
+
+typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name,
+ const char* err_msg, const char* file_name, int line, void* userdata );
+
+/** Assigns a new error-handling function */
+CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler,
+ void* userdata CV_DEFAULT(NULL),
+ void** prev_userdata CV_DEFAULT(NULL) );
+
+/** Output nothing */
+CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg,
+ const char* file_name, int line, void* userdata );
+
+/** Output to console(fprintf(stderr,...)) */
+CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg,
+ const char* file_name, int line, void* userdata );
+
+/** Output to MessageBox(WIN32) */
+CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg,
+ const char* file_name, int line, void* userdata );
+
+#define OPENCV_ERROR(status,func,context) \
+cvError((status),(func),(context),__FILE__,__LINE__)
+
+#define OPENCV_ASSERT(expr,func,context) \
+{if (! (expr)) \
+{OPENCV_ERROR(CV_StsInternal,(func),(context));}}
+
+#define OPENCV_CALL( Func ) \
+{ \
+Func; \
+}
+
+
+/** CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */
+#ifdef CV_NO_FUNC_NAMES
+#define CV_FUNCNAME( Name )
+#define cvFuncName ""
+#else
+#define CV_FUNCNAME( Name ) \
+static char cvFuncName[] = Name
+#endif
+
+
+/**
+ CV_ERROR macro unconditionally raises error with passed code and message.
+ After raising error, control will be transferred to the exit label.
+ */
+#define CV_ERROR( Code, Msg ) \
+{ \
+ cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \
+ __CV_EXIT__; \
+}
+
+/**
+ CV_CHECK macro checks error status after CV (or IPL)
+ function call. If error detected, control will be transferred to the exit
+ label.
+ */
+#define CV_CHECK() \
+{ \
+ if( cvGetErrStatus() < 0 ) \
+ CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \
+}
+
+
+/**
+ CV_CALL macro calls CV (or IPL) function, checks error status and
+ signals a error if the function failed. Useful in "parent node"
+ error procesing mode
+ */
+#define CV_CALL( Func ) \
+{ \
+ Func; \
+ CV_CHECK(); \
+}
+
+
+/** Runtime assertion macro */
+#define CV_ASSERT( Condition ) \
+{ \
+ if( !(Condition) ) \
+ CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \
+}
+
+#define __CV_BEGIN__ {
+#define __CV_END__ goto exit; exit: ; }
+#define __CV_EXIT__ goto exit
+
+/** @} core_c */
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#ifdef __cplusplus
+
+//! @addtogroup core_c_glue
+//! @{
+
+//! class for automatic module/RTTI data registration/unregistration
+struct CV_EXPORTS CvType
+{
+ CvType( const char* type_name,
+ CvIsInstanceFunc is_instance, CvReleaseFunc release=0,
+ CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 );
+ ~CvType();
+ CvTypeInfo* info;
+
+ static CvTypeInfo* first;
+ static CvTypeInfo* last;
+};
+
+//! @}
+
+#include "opencv2/core/utility.hpp"
+
+namespace cv
+{
+
+//! @addtogroup core_c_glue
+//! @{
+
+/////////////////////////////////////////// glue ///////////////////////////////////////////
+
+//! converts array (CvMat or IplImage) to cv::Mat
+CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false,
+ bool allowND=true, int coiMode=0,
+ AutoBuffer* buf=0);
+
+static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0)
+{
+ return cvarrToMat(arr, copyData, true, coiMode);
+}
+
+
+//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it.
+CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1);
+//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage
+CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1);
+
+
+
+////// specialized implementations of DefaultDeleter::operator() for classic OpenCV types //////
+
+template<> CV_EXPORTS void DefaultDeleter::operator ()(CvMat* obj) const;
+template<> CV_EXPORTS void DefaultDeleter::operator ()(IplImage* obj) const;
+template<> CV_EXPORTS void DefaultDeleter::operator ()(CvMatND* obj) const;
+template<> CV_EXPORTS void DefaultDeleter::operator ()(CvSparseMat* obj) const;
+template<> CV_EXPORTS void DefaultDeleter