From 12b8d542b7465f495681d3dc0c50cd27f7e0ee94 Mon Sep 17 00:00:00 2001 From: Jojo R Date: Tue, 17 Nov 2020 15:29:03 +0800 Subject: [PATCH 01/14] norm.cpp(normL2Sqr_): improve performance of pipeline The most of target machine use one type cpu unit resource to execute some one type of instruction, e.g. all vx_load API use load/store cpu unit, and v_muladd API use mul/mula cpu unit, we interleave vx_load and v_muladd to improve performance on most targets like RISCV or ARM. --- modules/core/src/norm.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/core/src/norm.cpp b/modules/core/src/norm.cpp index 9aaed8e980..b95cd99bd8 100644 --- a/modules/core/src/norm.cpp +++ b/modules/core/src/norm.cpp @@ -152,10 +152,10 @@ float normL2Sqr_(const float* a, const float* b, int n) { v_float32 t0 = vx_load(a + j) - vx_load(b + j); v_float32 t1 = vx_load(a + j + v_float32::nlanes) - vx_load(b + j + v_float32::nlanes); - v_float32 t2 = vx_load(a + j + 2 * v_float32::nlanes) - vx_load(b + j + 2 * v_float32::nlanes); - v_float32 t3 = vx_load(a + j + 3 * v_float32::nlanes) - vx_load(b + j + 3 * v_float32::nlanes); v_d0 = v_muladd(t0, t0, v_d0); + v_float32 t2 = vx_load(a + j + 2 * v_float32::nlanes) - vx_load(b + j + 2 * v_float32::nlanes); v_d1 = v_muladd(t1, t1, v_d1); + v_float32 t3 = vx_load(a + j + 3 * v_float32::nlanes) - vx_load(b + j + 3 * v_float32::nlanes); v_d2 = v_muladd(t2, t2, v_d2); v_d3 = v_muladd(t3, t3, v_d3); } From 36d771affc952abb30bd2eb8c9d610223be5a68f Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 26 Nov 2020 12:24:15 +0000 Subject: [PATCH 02/14] python: restore sys.path in bootstrap() - multiprocessing need to start from bootstrap code - loading may fail due to missing os.add_dll_directory() calls --- modules/python/package/cv2/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/modules/python/package/cv2/__init__.py b/modules/python/package/cv2/__init__.py index d367998b0b..940ac65732 100644 --- a/modules/python/package/cv2/__init__.py +++ b/modules/python/package/cv2/__init__.py @@ -18,6 +18,10 @@ except ImportError: def bootstrap(): import sys + + import copy + save_sys_path = copy.copy(sys.path) + if hasattr(sys, 'OpenCV_LOADER'): print(sys.path) raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.') @@ -85,6 +89,8 @@ def bootstrap(): del sys.modules['cv2'] import cv2 + sys.path = save_sys_path # multiprocessing should start from bootstrap code (https://github.com/opencv/opencv/issues/18502) + try: import sys del sys.OpenCV_LOADER From da2978f607e4566601d3e86a873f46178b090172 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 28 Nov 2020 13:13:28 +0000 Subject: [PATCH 03/14] ts: cvtest::debugLevel / --test_debug= option --- modules/calib3d/test/test_cornerssubpix.cpp | 7 ++++--- modules/calib3d/test/test_fisheye.cpp | 6 ++++-- modules/imgproc/test/test_drawing.cpp | 3 ++- modules/ts/include/opencv2/ts/ts_ext.hpp | 1 + modules/ts/src/ts.cpp | 10 ++++++++++ 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/modules/calib3d/test/test_cornerssubpix.cpp b/modules/calib3d/test/test_cornerssubpix.cpp index 05b75c5cbc..b70cc1e988 100644 --- a/modules/calib3d/test/test_cornerssubpix.cpp +++ b/modules/calib3d/test/test_cornerssubpix.cpp @@ -153,9 +153,8 @@ void CV_ChessboardSubpixelTest::run( int ) vector test_corners; bool result = findChessboardCorners(chessboard_image, pattern_size, test_corners, 15); - if(!result) + if (!result && cvtest::debugLevel > 0) { -#if 0 ts->printf(cvtest::TS::LOG, "Warning: chessboard was not detected! Writing image to test.png\n"); ts->printf(cvtest::TS::LOG, "Size = %d, %d\n", pattern_size.width, pattern_size.height); ts->printf(cvtest::TS::LOG, "Intrinsic params: fx = %f, fy = %f, cx = %f, cy = %f\n", @@ -167,7 +166,9 @@ void CV_ChessboardSubpixelTest::run( int ) distortion_coeffs_.at(0, 4)); imwrite("test.png", chessboard_image); -#endif + } + if (!result) + { continue; } diff --git a/modules/calib3d/test/test_fisheye.cpp b/modules/calib3d/test/test_fisheye.cpp index eedc2fa4fe..8e509cf35e 100644 --- a/modules/calib3d/test/test_fisheye.cpp +++ b/modules/calib3d/test/test_fisheye.cpp @@ -449,7 +449,10 @@ TEST_F(fisheyeTest, stereoRectify) << "Q =" << std::endl << Q << std::endl; } -#if 1 // Debug code + if (cvtest::debugLevel == 0) + return; + // DEBUG code is below + cv::Mat lmapx, lmapy, rmapx, rmapy; //rewrite for fisheye cv::fisheye::initUndistortRectifyMap(K1, D1, R1, P1, requested_size, CV_32F, lmapx, lmapy); @@ -482,7 +485,6 @@ TEST_F(fisheyeTest, stereoRectify) cv::imwrite(cv::format("fisheye_rectification_AB_%03d.png", i), rectification); } -#endif } TEST_F(fisheyeTest, stereoCalibrate) diff --git a/modules/imgproc/test/test_drawing.cpp b/modules/imgproc/test/test_drawing.cpp index fab2631041..42aa386b5a 100644 --- a/modules/imgproc/test/test_drawing.cpp +++ b/modules/imgproc/test/test_drawing.cpp @@ -487,7 +487,8 @@ protected: img->copyTo(sub); shift += img->size().height + 1; } - //imwrite("/tmp/all_fonts.png", result); + if (cvtest::debugLevel > 0) + imwrite("all_fonts.png", result); } }; diff --git a/modules/ts/include/opencv2/ts/ts_ext.hpp b/modules/ts/include/opencv2/ts/ts_ext.hpp index b2a4cac241..5c09b569a5 100644 --- a/modules/ts/include/opencv2/ts/ts_ext.hpp +++ b/modules/ts/include/opencv2/ts/ts_ext.hpp @@ -13,6 +13,7 @@ void checkIppStatus(); extern bool skipUnstableTests; extern bool runBigDataTests; extern int testThreads; +extern int debugLevel; //< 0 - no debug, 1 - basic test debug information, >1 - extra debug information void testSetUp(); void testTearDown(); diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index bad799dc4d..b66779c829 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -774,6 +774,7 @@ static bool checkTestData = cv::utils::getConfigurationParameterBool("OPENCV_TES bool skipUnstableTests = false; bool runBigDataTests = false; int testThreads = 0; +int debugLevel = (int)cv::utils::getConfigurationParameterSizeT("OPENCV_TEST_DEBUG", 0); static size_t memory_usage_base = 0; @@ -883,6 +884,7 @@ void parseCustomOptions(int argc, char **argv) "{ test_threads |-1 |the number of worker threads, if parallel execution is enabled}" "{ skip_unstable |false |skip unstable tests }" "{ test_bigdata |false |run BigData tests (>=2Gb) }" + "{ test_debug | |0 - no debug (default), 1 - basic test debug information, >1 - extra debug information }" "{ test_require_data |") + (checkTestData ? "true" : "false") + string("|fail on missing non-required test data instead of skip (env:OPENCV_TEST_REQUIRE_DATA)}" CV_TEST_TAGS_PARAMS "{ h help |false |print help info }" @@ -909,6 +911,14 @@ void parseCustomOptions(int argc, char **argv) skipUnstableTests = parser.get("skip_unstable"); runBigDataTests = parser.get("test_bigdata"); + if (parser.has("test_debug")) + { + cv::String s = parser.get("test_debug"); + if (s.empty() || s == "true") + debugLevel = 1; + else + debugLevel = parser.get("test_debug"); + } if (parser.has("test_require_data")) checkTestData = parser.get("test_require_data"); From 4e4458416d4140dea509d669a8605650ac0ff314 Mon Sep 17 00:00:00 2001 From: Zhiming-Zeng <1773677072@qq.com> Date: Sun, 29 Nov 2020 18:09:42 +0800 Subject: [PATCH 04/14] Merge pull request #18064 from akineeic:gsoc_2020_dnn [GSoC] Develop OpenCV.js DNN modules for promising web use cases together with their tutorials * [Opencv.js doc] Init commit to add image classification example in opencv.js tutorial * [Opencv.js doc] Make the code snippet interactive and put the functions into code snippet. * Fix the utils.loadOpenCv for promise module * [Opencv.js doc] Code modify and fixed layout issue. * [Opencv.js doc] Add a JSON file to store parameters for models and show in the web page. * [Opencv.js doc] Change let to const. * [Opencv.js doc] Init commit to add image classification example with camera in opencv.js tutorial * [Opencv.js doc] Init commit to add semantic segmentation example in opencv.js tutorial * [Opencv.js doc] Add object detection example, supprot YOLOv2 * [Opencv.js doc] Support SSD model for object detection example * [Opencv.js doc] Add fast neural style transfer example with opencv.js * [Opencv.js doc] Add pose estimation example in opencv.js tutorial * Delete whitespace for code check * [Opencv.js doc] Add object detection example with camera * [Opencv.js doc] Add json files containing model information to each example * [Opencv.js doc] Add a js file for common function in dnn example * [Opencv.js doc] Create single function getBlobFromImage * [Opencv.js doc] Add url of model into webpage * [OpenCV.js doc] Update UI for running * [Opencv.js doc] Load dnn model by input button * [Opencv.js doc] Fix some UI issues * [Opencv.js doc] Change code format Co-authored-by: Ningxin Hu --- .../js_assets/js_dnn_example_helper.js | 119 ++++++ .../js_assets/js_image_classification.html | 263 ++++++++++++ .../js_image_classification_model_info.json | 65 +++ .../js_image_classification_with_camera.html | 281 ++++++++++++ .../js_assets/js_object_detection.html | 387 +++++++++++++++++ .../js_object_detection_model_info.json | 39 ++ .../js_object_detection_with_camera.html | 402 ++++++++++++++++++ .../js_assets/js_pose_estimation.html | 327 ++++++++++++++ .../js_pose_estimation_model_info.json | 34 ++ .../js_assets/js_semantic_segmentation.html | 243 +++++++++++ .../js_semantic_segmentation_model_info.json | 12 + .../js_assets/js_style_transfer.html | 228 ++++++++++ .../js_style_transfer_model_info.json | 76 ++++ doc/js_tutorials/js_assets/utils.js | 10 +- .../js_image_classification.markdown | 13 + ..._image_classification_with_camera.markdown | 15 + .../js_object_detection.markdown | 13 + .../js_object_detection_with_camera.markdown | 13 + .../js_pose_estimation.markdown | 13 + .../js_semantic_segmentation.markdown | 13 + .../js_style_transfer.markdown | 13 + .../js_dnn/js_table_of_contents_dnn.markdown | 30 ++ doc/js_tutorials/js_tutorials.markdown | 4 + 23 files changed, 2611 insertions(+), 2 deletions(-) create mode 100644 doc/js_tutorials/js_assets/js_dnn_example_helper.js create mode 100644 doc/js_tutorials/js_assets/js_image_classification.html create mode 100644 doc/js_tutorials/js_assets/js_image_classification_model_info.json create mode 100644 doc/js_tutorials/js_assets/js_image_classification_with_camera.html create mode 100644 doc/js_tutorials/js_assets/js_object_detection.html create mode 100644 doc/js_tutorials/js_assets/js_object_detection_model_info.json create mode 100644 doc/js_tutorials/js_assets/js_object_detection_with_camera.html create mode 100644 doc/js_tutorials/js_assets/js_pose_estimation.html create mode 100644 doc/js_tutorials/js_assets/js_pose_estimation_model_info.json create mode 100644 doc/js_tutorials/js_assets/js_semantic_segmentation.html create mode 100644 doc/js_tutorials/js_assets/js_semantic_segmentation_model_info.json create mode 100644 doc/js_tutorials/js_assets/js_style_transfer.html create mode 100644 doc/js_tutorials/js_assets/js_style_transfer_model_info.json create mode 100644 doc/js_tutorials/js_dnn/js_image_classification/js_image_classification.markdown create mode 100644 doc/js_tutorials/js_dnn/js_image_classification/js_image_classification_with_camera.markdown create mode 100644 doc/js_tutorials/js_dnn/js_object_detection/js_object_detection.markdown create mode 100644 doc/js_tutorials/js_dnn/js_object_detection/js_object_detection_with_camera.markdown create mode 100644 doc/js_tutorials/js_dnn/js_pose_estimation/js_pose_estimation.markdown create mode 100644 doc/js_tutorials/js_dnn/js_semantic_segmentation/js_semantic_segmentation.markdown create mode 100644 doc/js_tutorials/js_dnn/js_style_transfer/js_style_transfer.markdown create mode 100644 doc/js_tutorials/js_dnn/js_table_of_contents_dnn.markdown diff --git a/doc/js_tutorials/js_assets/js_dnn_example_helper.js b/doc/js_tutorials/js_assets/js_dnn_example_helper.js new file mode 100644 index 0000000000..06baa6760b --- /dev/null +++ b/doc/js_tutorials/js_assets/js_dnn_example_helper.js @@ -0,0 +1,119 @@ +getBlobFromImage = function(inputSize, mean, std, swapRB, image) { + let mat; + if (typeof(image) === 'string') { + mat = cv.imread(image); + } else { + mat = image; + } + + let matC3 = new cv.Mat(mat.matSize[0], mat.matSize[1], cv.CV_8UC3); + cv.cvtColor(mat, matC3, cv.COLOR_RGBA2BGR); + let input = cv.blobFromImage(matC3, std, new cv.Size(inputSize[0], inputSize[1]), + new cv.Scalar(mean[0], mean[1], mean[2]), swapRB); + + matC3.delete(); + return input; +} + +loadLables = async function(labelsUrl) { + let response = await fetch(labelsUrl); + let label = await response.text(); + label = label.split('\n'); + return label; +} + +loadModel = async function(e) { + return new Promise((resolve) => { + let file = e.target.files[0]; + let path = file.name; + let reader = new FileReader(); + reader.readAsArrayBuffer(file); + reader.onload = function(ev) { + if (reader.readyState === 2) { + let buffer = reader.result; + let data = new Uint8Array(buffer); + cv.FS_createDataFile('/', path, data, true, false, false); + resolve(path); + } + } + }); +} + +getTopClasses = function(probs, labels, topK = 3) { + probs = Array.from(probs); + let indexes = probs.map((prob, index) => [prob, index]); + let sorted = indexes.sort((a, b) => { + if (a[0] === b[0]) {return 0;} + return a[0] < b[0] ? -1 : 1; + }); + sorted.reverse(); + let classes = []; + for (let i = 0; i < topK; ++i) { + let prob = sorted[i][0]; + let index = sorted[i][1]; + let c = { + label: labels[index], + prob: (prob * 100).toFixed(2) + } + classes.push(c); + } + return classes; +} + +loadImageToCanvas = function(e, canvasId) { + let files = e.target.files; + let imgUrl = URL.createObjectURL(files[0]); + let canvas = document.getElementById(canvasId); + let ctx = canvas.getContext('2d'); + let img = new Image(); + img.crossOrigin = 'anonymous'; + img.src = imgUrl; + img.onload = function() { + ctx.drawImage(img, 0, 0, canvas.width, canvas.height); + }; +} + +drawInfoTable = async function(jsonUrl, divId) { + let response = await fetch(jsonUrl); + let json = await response.json(); + + let appendix = document.getElementById(divId); + for (key of Object.keys(json)) { + let h3 = document.createElement('h3'); + h3.textContent = key + " model"; + appendix.appendChild(h3); + + let table = document.createElement('table'); + let head_tr = document.createElement('tr'); + for (head of Object.keys(json[key][0])) { + let th = document.createElement('th'); + th.textContent = head; + th.style.border = "1px solid black"; + head_tr.appendChild(th); + } + table.appendChild(head_tr) + + for (model of json[key]) { + let tr = document.createElement('tr'); + for (params of Object.keys(model)) { + let td = document.createElement('td'); + td.style.border = "1px solid black"; + if (params !== "modelUrl" && params !== "configUrl" && params !== "labelsUrl") { + td.textContent = model[params]; + tr.appendChild(td); + } else { + let a = document.createElement('a'); + let link = document.createTextNode('link'); + a.append(link); + a.href = model[params]; + td.appendChild(a); + tr.appendChild(td); + } + } + table.appendChild(tr); + } + table.style.width = "800px"; + table.style.borderCollapse = "collapse"; + appendix.appendChild(table); + } +} diff --git a/doc/js_tutorials/js_assets/js_image_classification.html b/doc/js_tutorials/js_assets/js_image_classification.html new file mode 100644 index 0000000000..656f2720b6 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_image_classification.html @@ -0,0 +1,263 @@ + + + + + + Image Classification Example + + + + +

Image Classification Example

+

+ This tutorial shows you how to write an image classification example with OpenCV.js.
+ To try the example you should click the modelFile button(and configFile button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Try it button to see the result. You can choose any other images.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+
+
+ canvasInput +
+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.Main loop in which will read the image from canvas and do inference once.

+ +

3.Load labels from txt file and process it into an array.

+ +

4.Get blob from image as input for net, and standardize it with mean and std.

+ +

5.Fetch model file and save to emscripten file system once click the input button.

+ +

6.The post-processing, including softmax if needed and get the top classes from the output vector.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_image_classification_model_info.json b/doc/js_tutorials/js_assets/js_image_classification_model_info.json new file mode 100644 index 0000000000..67553ec2d3 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_image_classification_model_info.json @@ -0,0 +1,65 @@ +{ + "caffe": [ + { + "model": "alexnet", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "needSoftmax": "false", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt", + "modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel", + "configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_alexnet/deploy.prototxt" + }, + { + "model": "densenet", + "mean": "127.5, 127.5, 127.5", + "std": "0.007843", + "swapRB": "false", + "needSoftmax": "true", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt", + "modelUrl": "https://drive.google.com/open?id=0B7ubpZO7HnlCcHlfNmJkU2VPelE", + "configUrl": "https://raw.githubusercontent.com/shicai/DenseNet-Caffe/master/DenseNet_121.prototxt" + }, + { + "model": "googlenet", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "needSoftmax": "false", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt", + "modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel", + "configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt" + }, + { + "model": "squeezenet", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "needSoftmax": "false", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt", + "modelUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel", + "configUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/deploy.prototxt" + }, + { + "model": "VGG", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "needSoftmax": "false", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt", + "modelUrl": "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel", + "configUrl": "https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/f02f8769e64494bcd3d7e97d5d747ac275825721/VGG_ILSVRC_19_layers_deploy.prototxt" + } + ], + "tensorflow": [ + { + "model": "inception", + "mean": "123, 117, 104", + "std": "1", + "swapRB": "true", + "needSoftmax": "false", + "labelsUrl": "https://raw.githubusercontent.com/petewarden/tf_ios_makefile_example/master/data/imagenet_comp_graph_label_strings.txt", + "modelUrl": "https://raw.githubusercontent.com/petewarden/tf_ios_makefile_example/master/data/tensorflow_inception_graph.pb" + } + ] +} \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_image_classification_with_camera.html b/doc/js_tutorials/js_assets/js_image_classification_with_camera.html new file mode 100644 index 0000000000..9a2473cf2b --- /dev/null +++ b/doc/js_tutorials/js_assets/js_image_classification_with_camera.html @@ -0,0 +1,281 @@ + + + + + + Image Classification Example with Camera + + + + +

Image Classification Example with Camera

+

+ This tutorial shows you how to write an image classification example with camera.
+ To try the example you should click the modelFile button(and configFile button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Start/Stop button to start or stop the camera capture.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+
+
+ videoInput +
+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.The function to capture video from camera, and the main loop in which will do inference once.

+ +

3.Load labels from txt file and process it into an array.

+ +

4.Get blob from image as input for net, and standardize it with mean and std.

+ +

5.Fetch model file and save to emscripten file system once click the input button.

+ +

6.The post-processing, including softmax if needed and get the top classes from the output vector.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_object_detection.html b/doc/js_tutorials/js_assets/js_object_detection.html new file mode 100644 index 0000000000..53f1e48639 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_object_detection.html @@ -0,0 +1,387 @@ + + + + + + Object Detection Example + + + + +

Object Detection Example

+

+ This tutorial shows you how to write an object detection example with OpenCV.js.
+ To try the example you should click the modelFile button(and configFile button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Try it button to see the result. You can choose any other images.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + +
+
+ canvasInput +
+
+

+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.Main loop in which will read the image from canvas and do inference once.

+ +

3.Load labels from txt file and process it into an array.

+ +

4.Get blob from image as input for net, and standardize it with mean and std.

+ +

5.Fetch model file and save to emscripten file system once click the input button.

+ +

6.The post-processing, including get boxes from output and draw boxes into the image.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_object_detection_model_info.json b/doc/js_tutorials/js_assets/js_object_detection_model_info.json new file mode 100644 index 0000000000..c0d14be714 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_object_detection_model_info.json @@ -0,0 +1,39 @@ +{ + "caffe": [ + { + "model": "mobilenet_SSD", + "inputSize": "300, 300", + "mean": "127.5, 127.5, 127.5", + "std": "0.007843", + "swapRB": "false", + "outType": "SSD", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt", + "modelUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/mobilenet_iter_73000.caffemodel", + "configUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/deploy.prototxt" + }, + { + "model": "VGG_SSD", + "inputSize": "300, 300", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "outType": "SSD", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt", + "modelUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download", + "configUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download" + } + ], + "darknet": [ + { + "model": "yolov2_tiny", + "inputSize": "416, 416", + "mean": "0, 0, 0", + "std": "0.00392", + "swapRB": "false", + "outType": "YOLO", + "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_yolov3.txt", + "modelUrl": "https://pjreddie.com/media/files/yolov2-tiny.weights", + "configUrl": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-tiny.cfg" + } + ] +} \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_object_detection_with_camera.html b/doc/js_tutorials/js_assets/js_object_detection_with_camera.html new file mode 100644 index 0000000000..41bb609708 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_object_detection_with_camera.html @@ -0,0 +1,402 @@ + + + + + + Object Detection Example with Camera + + + + +

Object Detection Example with Camera

+

+ This tutorial shows you how to write an object detection example with camera.
+ To try the example you should click the modelFile button(and configInput button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Start/Stop button to start or stop the camera capture.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + +
+
+ videoInput +
+
+

+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.The function to capture video from camera, and the main loop in which will do inference once.

+ +

3.Load labels from txt file and process it into an array.

+ +

4.Get blob from image as input for net, and standardize it with mean and std.

+ +

5.Fetch model file and save to emscripten file system once click the input button.

+ +

6.The post-processing, including get boxes from output and draw boxes into the image.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_pose_estimation.html b/doc/js_tutorials/js_assets/js_pose_estimation.html new file mode 100644 index 0000000000..19c64663d1 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_pose_estimation.html @@ -0,0 +1,327 @@ + + + + + + Pose Estimation Example + + + + +

Pose Estimation Example

+

+ This tutorial shows you how to write an pose estimation example with OpenCV.js.
+ To try the example you should click the modelFile button(and configInput button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Try it button to see the result. You can choose any other images.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + +
+
+ canvasInput +
+
+

+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.Main loop in which will read the image from canvas and do inference once.

+ +

3.Get blob from image as input for net, and standardize it with mean and std.

+ +

4.Fetch model file and save to emscripten file system once click the input button.

+ +

5.The pairs of keypoints of different dataset.

+ +

6.The post-processing, including get the predicted points and draw lines into the image.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_pose_estimation_model_info.json b/doc/js_tutorials/js_assets/js_pose_estimation_model_info.json new file mode 100644 index 0000000000..922c813f39 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_pose_estimation_model_info.json @@ -0,0 +1,34 @@ +{ + "caffe": [ + { + "model": "body_25", + "inputSize": "368, 368", + "mean": "0, 0, 0", + "std": "0.00392", + "swapRB": "false", + "dataset": "BODY_25", + "modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/body_25/pose_iter_584000.caffemodel", + "configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/body_25/pose_deploy.prototxt" + }, + { + "model": "coco", + "inputSize": "368, 368", + "mean": "0, 0, 0", + "std": "0.00392", + "swapRB": "false", + "dataset": "COCO", + "modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel", + "configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/coco/pose_deploy_linevec.prototxt" + }, + { + "model": "mpi", + "inputSize": "368, 368", + "mean": "0, 0, 0", + "std": "0.00392", + "swapRB": "false", + "dataset": "MPI", + "modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel", + "configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/mpi/pose_deploy_linevec.prototxt" + } + ] +} \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_semantic_segmentation.html b/doc/js_tutorials/js_assets/js_semantic_segmentation.html new file mode 100644 index 0000000000..6fc27dbd19 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_semantic_segmentation.html @@ -0,0 +1,243 @@ + + + + + + Semantic Segmentation Example + + + + +

Semantic Segmentation Example

+

+ This tutorial shows you how to write an semantic segmentation example with OpenCV.js.
+ To try the example you should click the modelFile button(and configInput button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Try it button to see the result. You can choose any other images.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + +
+
+ canvasInput +
+
+

+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.Main loop in which will read the image from canvas and do inference once.

+ +

3.Get blob from image as input for net, and standardize it with mean and std.

+ +

4.Fetch model file and save to emscripten file system once click the input button.

+ +

5.The post-processing, including gengerate colors for different classes and argmax to get the classes for each pixel.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_semantic_segmentation_model_info.json b/doc/js_tutorials/js_assets/js_semantic_segmentation_model_info.json new file mode 100644 index 0000000000..ef0016af1d --- /dev/null +++ b/doc/js_tutorials/js_assets/js_semantic_segmentation_model_info.json @@ -0,0 +1,12 @@ +{ + "tensorflow": [ + { + "model": "deeplabv3", + "inputSize": "513, 513", + "mean": "127.5, 127.5, 127.5", + "std": "0.007843", + "swapRB": "false", + "modelUrl": "https://drive.google.com/uc?id=1v-hfGenaE9tiGOzo5qdgMNG_gqQ5-Xn4&export=download" + } + ] +} \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_style_transfer.html b/doc/js_tutorials/js_assets/js_style_transfer.html new file mode 100644 index 0000000000..91422e1344 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_style_transfer.html @@ -0,0 +1,228 @@ + + + + + + Style Transfer Example + + + + +

Style Transfer Example

+

+ This tutorial shows you how to write an style transfer example with OpenCV.js.
+ To try the example you should click the modelFile button(and configFile button if needed) to upload inference model. + You can find the model URLs and parameters in the model info section. + Then You should change the parameters in the first code snippet according to the uploaded model. + Finally click Try it button to see the result. You can choose any other images.
+

+ +
+
+ + + + + + + + + + + + + + + +
+ + + +
+
+ canvasInput +
+
+

+
+
+ modelFile +
+
+
+ configFile +
+
+
+ +
+

+
+ +
+

Help function

+

1.The parameters for model inference which you can modify to investigate more models.

+ +

2.Main loop in which will read the image from canvas and do inference once.

+ +

3.Get blob from image as input for net, and standardize it with mean and std.

+ +

4.Fetch model file and save to emscripten file system once click the input button.

+ +

5.The post-processing, including scaling and reordering.

+ +
+ +
+

Model Info:

+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/js_style_transfer_model_info.json b/doc/js_tutorials/js_assets/js_style_transfer_model_info.json new file mode 100644 index 0000000000..9cc66018a0 --- /dev/null +++ b/doc/js_tutorials/js_assets/js_style_transfer_model_info.json @@ -0,0 +1,76 @@ +{ + "torch": [ + { + "model": "candy.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/candy.t7" + }, + { + "model": "composition_vii.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//eccv16/composition_vii.t7" + }, + { + "model": "feathers.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/feathers.t7" + }, + { + "model": "la_muse.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/la_muse.t7" + }, + { + "model": "mosaic.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/mosaic.t7" + }, + { + "model": "starry_night.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//eccv16/starry_night.t7" + }, + { + "model": "the_scream.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/the_scream.t7" + }, + { + "model": "the_wave.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//eccv16/the_wave.t7" + }, + { + "model": "udnie.t7", + "inputSize": "224, 224", + "mean": "104, 117, 123", + "std": "1", + "swapRB": "false", + "modelUrl": "https://cs.stanford.edu/people/jcjohns/fast-neural-style/models//instance_norm/udnie.t7" + } + ] +} \ No newline at end of file diff --git a/doc/js_tutorials/js_assets/utils.js b/doc/js_tutorials/js_assets/utils.js index 4d5deb0b51..65f6d1782d 100644 --- a/doc/js_tutorials/js_assets/utils.js +++ b/doc/js_tutorials/js_assets/utils.js @@ -7,7 +7,7 @@ function Utils(errorOutputId) { // eslint-disable-line no-unused-vars let script = document.createElement('script'); script.setAttribute('async', ''); script.setAttribute('type', 'text/javascript'); - script.addEventListener('load', () => { + script.addEventListener('load', async () => { if (cv.getBuildInformation) { console.log(cv.getBuildInformation()); @@ -16,9 +16,15 @@ function Utils(errorOutputId) { // eslint-disable-line no-unused-vars else { // WASM - cv['onRuntimeInitialized']=()=>{ + if (cv instanceof Promise) { + cv = await cv; console.log(cv.getBuildInformation()); onloadCallback(); + } else { + cv['onRuntimeInitialized']=()=>{ + console.log(cv.getBuildInformation()); + onloadCallback(); + } } } }); diff --git a/doc/js_tutorials/js_dnn/js_image_classification/js_image_classification.markdown b/doc/js_tutorials/js_dnn/js_image_classification/js_image_classification.markdown new file mode 100644 index 0000000000..1a94f8d14a --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_image_classification/js_image_classification.markdown @@ -0,0 +1,13 @@ +Image Classification Example {#tutorial_js_image_classification} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for image classification. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_image_classification/js_image_classification_with_camera.markdown b/doc/js_tutorials/js_dnn/js_image_classification/js_image_classification_with_camera.markdown new file mode 100644 index 0000000000..bdf11161fc --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_image_classification/js_image_classification_with_camera.markdown @@ -0,0 +1,15 @@ +Image Classification Example with Camera {#tutorial_js_image_classification_with_camera} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for image classification example with camera. + +@note If you don't know how to capture video from camera, please review @ref tutorial_js_video_display. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_object_detection/js_object_detection.markdown b/doc/js_tutorials/js_dnn/js_object_detection/js_object_detection.markdown new file mode 100644 index 0000000000..980b45c236 --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_object_detection/js_object_detection.markdown @@ -0,0 +1,13 @@ +Object Detection Example {#tutorial_js_object_detection} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for object detection. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_object_detection/js_object_detection_with_camera.markdown b/doc/js_tutorials/js_dnn/js_object_detection/js_object_detection_with_camera.markdown new file mode 100644 index 0000000000..e6e8f6f957 --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_object_detection/js_object_detection_with_camera.markdown @@ -0,0 +1,13 @@ +Object Detection Example with Camera{#tutorial_js_object_detection_with_camera} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for object detection with camera. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_pose_estimation/js_pose_estimation.markdown b/doc/js_tutorials/js_dnn/js_pose_estimation/js_pose_estimation.markdown new file mode 100644 index 0000000000..b090ff2cfb --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_pose_estimation/js_pose_estimation.markdown @@ -0,0 +1,13 @@ +Pose Estimation Example {#tutorial_js_pose_estimation} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for pose estimation. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_semantic_segmentation/js_semantic_segmentation.markdown b/doc/js_tutorials/js_dnn/js_semantic_segmentation/js_semantic_segmentation.markdown new file mode 100644 index 0000000000..50177fb549 --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_semantic_segmentation/js_semantic_segmentation.markdown @@ -0,0 +1,13 @@ +Semantic Segmentation Example {#tutorial_js_semantic_segmentation} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for semantic segmentation. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_style_transfer/js_style_transfer.markdown b/doc/js_tutorials/js_dnn/js_style_transfer/js_style_transfer.markdown new file mode 100644 index 0000000000..7c1799ac6a --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_style_transfer/js_style_transfer.markdown @@ -0,0 +1,13 @@ +Style Transfer Example {#tutorial_js_style_transfer} +======================================= + +Goal +---- + +- In this tutorial you will learn how to use OpenCV.js dnn module for style transfer. + +\htmlonly + +\endhtmlonly \ No newline at end of file diff --git a/doc/js_tutorials/js_dnn/js_table_of_contents_dnn.markdown b/doc/js_tutorials/js_dnn/js_table_of_contents_dnn.markdown new file mode 100644 index 0000000000..e008dc81d1 --- /dev/null +++ b/doc/js_tutorials/js_dnn/js_table_of_contents_dnn.markdown @@ -0,0 +1,30 @@ +Deep Neural Networks (dnn module) {#tutorial_js_table_of_contents_dnn} +============ + +- @subpage tutorial_js_image_classification + + Image classification example + +- @subpage tutorial_js_image_classification_with_camera + + Image classification example with camera + +- @subpage tutorial_js_object_detection + + Object detection example + +- @subpage tutorial_js_object_detection_with_camera + + Object detection example with camera + +- @subpage tutorial_js_semantic_segmentation + + Semantic segmentation example + +- @subpage tutorial_js_style_transfer + + Style transfer example + +- @subpage tutorial_js_pose_estimation + + Pose estimation example diff --git a/doc/js_tutorials/js_tutorials.markdown b/doc/js_tutorials/js_tutorials.markdown index c8a8f92a31..73e69daa98 100644 --- a/doc/js_tutorials/js_tutorials.markdown +++ b/doc/js_tutorials/js_tutorials.markdown @@ -26,3 +26,7 @@ OpenCV.js Tutorials {#tutorial_js_root} In this section you will object detection techniques like face detection etc. + +- @subpage tutorial_js_table_of_contents_dnn + + These tutorials show how to use dnn module in JavaScript From 24fac5f56d61e0caa757a99c0bbfbc9239d6ce7d Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Mon, 30 Nov 2020 11:54:51 +0300 Subject: [PATCH 05/14] Added test for VideoCapture CAP_PROP_FRAME_MSEC option. - Suppressed FFMPEG + h264, h265 as it does not pass tests with CI configuration. - Suppressed MediaFoundation backend as it always returns zero for now. --- modules/videoio/src/cap_mjpeg_decoder.cpp | 2 ++ modules/videoio/test/test_video_io.cpp | 30 +++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/modules/videoio/src/cap_mjpeg_decoder.cpp b/modules/videoio/src/cap_mjpeg_decoder.cpp index 116f118d28..a3c87812ac 100644 --- a/modules/videoio/src/cap_mjpeg_decoder.cpp +++ b/modules/videoio/src/cap_mjpeg_decoder.cpp @@ -116,6 +116,8 @@ double MotionJpegCapture::getProperty(int property) const { case CAP_PROP_POS_FRAMES: return (double)getFramePos(); + case CAP_PROP_POS_MSEC: + return (double)getFramePos() * (1000. / m_fps); case CAP_PROP_POS_AVI_RATIO: return double(getFramePos())/m_mjpeg_frames.size(); case CAP_PROP_FRAME_WIDTH: diff --git a/modules/videoio/test/test_video_io.cpp b/modules/videoio/test/test_video_io.cpp index 97b43ab68c..3f5617d8ce 100644 --- a/modules/videoio/test/test_video_io.cpp +++ b/modules/videoio/test/test_video_io.cpp @@ -231,6 +231,34 @@ public: else std::cout << "Frames counter is not available. Actual frames: " << count_actual << ". SKIP check." << std::endl; } + + void doTimestampTest() + { + if (!isBackendAvailable(apiPref, cv::videoio_registry::getStreamBackends())) + throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref)); + + if ((apiPref == CAP_MSMF) || ((apiPref == CAP_FFMPEG) && ((ext == "h264") || (ext == "h265")))) + throw SkipTestException(cv::String("Backend ") + cv::videoio_registry::getBackendName(apiPref) + + cv::String(" does not support CAP_PROP_POS_MSEC option")); + + VideoCapture cap; + EXPECT_NO_THROW(cap.open(video_file, apiPref)); + if (!cap.isOpened()) + throw SkipTestException(cv::String("Backend ") + cv::videoio_registry::getBackendName(apiPref) + + cv::String(" can't open the video: ") + video_file); + + Mat img; + for(int i = 0; i < 10; i++) + { + double timestamp = 0; + ASSERT_NO_THROW(cap >> img); + EXPECT_NO_THROW(timestamp = cap.get(CAP_PROP_POS_MSEC)); + const double frame_period = 1000.f/bunny_param.getFps(); + // NOTE: eps == frame_period, because videoCapture returns frame begining timestamp or frame end + // timestamp depending on codec and back-end. So the first frame has timestamp 0 or frame_period. + EXPECT_NEAR(timestamp, i*frame_period, frame_period); + } + } }; //================================================================================================== @@ -367,6 +395,8 @@ TEST_P(Videoio_Bunny, read_position) { doTest(); } TEST_P(Videoio_Bunny, frame_count) { doFrameCountTest(); } +TEST_P(Videoio_Bunny, frame_timestamp) { doTimestampTest(); } + INSTANTIATE_TEST_CASE_P(videoio, Videoio_Bunny, testing::Combine( testing::ValuesIn(bunny_params), From 91ce6ef190bd90d96fce50e3a26e870f50613d1e Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 1 Dec 2020 14:01:42 +0000 Subject: [PATCH 06/14] core(ipp): disable SSE4.2 code path in countNonZero() --- modules/core/src/count_non_zero.dispatch.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/core/src/count_non_zero.dispatch.cpp b/modules/core/src/count_non_zero.dispatch.cpp index 96b80c0d8c..cd3ed84350 100644 --- a/modules/core/src/count_non_zero.dispatch.cpp +++ b/modules/core/src/count_non_zero.dispatch.cpp @@ -62,11 +62,9 @@ static bool ipp_countNonZero( Mat &src, int &res ) { CV_INSTRUMENT_REGION_IPP(); -#if defined __APPLE__ || (defined _MSC_VER && defined _M_IX86) // see https://github.com/opencv/opencv/issues/17453 - if (src.dims <= 2 && src.step > 520000) + if (src.dims <= 2 && src.step > 520000 && cv::ipp::getIppTopFeatures() == ippCPUID_SSE42) return false; -#endif #if IPP_VERSION_X100 < 201801 // Poor performance of SSE42 From d35e2f533905a65cf1b5c3ff7b5cfc11b210aaf9 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 2 Dec 2020 00:21:19 +0000 Subject: [PATCH 07/14] core(ipp): workaround getIppTopFeatures() value mismatch --- modules/core/src/system.cpp | 15 +++++++++------ modules/ts/src/ts.cpp | 4 +++- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index fcb9ea45ef..ad688a6c68 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -2429,6 +2429,13 @@ public: ippTopFeatures = ippCPUID_SSE42; pIppLibInfo = ippiGetLibVersion(); + + // workaround: https://github.com/opencv/opencv/issues/12959 + std::string ippName(pIppLibInfo->Name ? pIppLibInfo->Name : ""); + if (ippName.find("SSE4.2") != std::string::npos) + { + ippTopFeatures = ippCPUID_SSE42; + } } public: @@ -2468,16 +2475,12 @@ int getIppFeatures() #endif } -unsigned long long getIppTopFeatures(); - +#ifdef HAVE_IPP unsigned long long getIppTopFeatures() { -#ifdef HAVE_IPP return getIPPSingleton().ippTopFeatures; -#else - return 0; -#endif } +#endif void setIppStatus(int status, const char * const _funcname, const char * const _filename, int _line) { diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index bad799dc4d..13f5eff251 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -1122,7 +1122,9 @@ void SystemInfoCollector::OnTestProgramStart(const testing::UnitTest&) } recordPropertyVerbose("cv_cpu_features", "CPU features", cv::getCPUFeaturesLine()); #ifdef HAVE_IPP - recordPropertyVerbose("cv_ipp_version", "Intel(R) IPP version", cv::ipp::useIPP() ? cv::ipp::getIppVersion() : "disabled"); + recordPropertyVerbose("cv_ipp_version", "Intel(R) IPP version", cv::ipp::useIPP() ? cv::ipp::getIppVersion() : "disabled"); + if (cv::ipp::useIPP()) + recordPropertyVerbose("cv_ipp_features", "Intel(R) IPP features code", cv::format("0x%llx", cv::ipp::getIppTopFeatures())); #endif #ifdef HAVE_OPENCL cv::dumpOpenCLInformation(); From c42d47d94ad5fb1343ed70ce1c6a73dbe7073900 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 2 Dec 2020 12:34:24 +0000 Subject: [PATCH 08/14] cmake: clean cached INTERNAL variable used for 3rdparty deps --- cmake/OpenCVFindLibsGrfmt.cmake | 8 +++++++- cmake/OpenCVUtils.cmake | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake index 4ad44fe833..f99bb33c80 100644 --- a/cmake/OpenCVFindLibsGrfmt.cmake +++ b/cmake/OpenCVFindLibsGrfmt.cmake @@ -6,6 +6,7 @@ if(BUILD_ZLIB) ocv_clear_vars(ZLIB_FOUND) else() + ocv_clear_internal_cache_vars(ZLIB_LIBRARY ZLIB_INCLUDE_DIR) find_package(ZLIB "${MIN_VER_ZLIB}") if(ZLIB_FOUND AND ANDROID) if(ZLIB_LIBRARIES MATCHES "/usr/(lib|lib32|lib64)/libz.so$") @@ -31,11 +32,12 @@ if(WITH_JPEG) if(BUILD_JPEG) ocv_clear_vars(JPEG_FOUND) else() + ocv_clear_internal_cache_vars(JPEG_LIBRARY JPEG_INCLUDE_DIR) include(FindJPEG) endif() if(NOT JPEG_FOUND) - ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR) + ocv_clear_vars(JPEG_LIBRARY JPEG_INCLUDE_DIR) if(NOT BUILD_JPEG_TURBO_DISABLE) set(JPEG_LIBRARY libjpeg-turbo CACHE INTERNAL "") @@ -76,6 +78,7 @@ if(WITH_TIFF) if(BUILD_TIFF) ocv_clear_vars(TIFF_FOUND) else() + ocv_clear_internal_cache_vars(TIFF_LIBRARY TIFF_INCLUDE_DIR) include(FindTIFF) if(TIFF_FOUND) ocv_parse_header("${TIFF_INCLUDE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION) @@ -119,6 +122,7 @@ if(WITH_WEBP) if(BUILD_WEBP) ocv_clear_vars(WEBP_FOUND WEBP_LIBRARY WEBP_LIBRARIES WEBP_INCLUDE_DIR) else() + ocv_clear_internal_cache_vars(WEBP_LIBRARY WEBP_INCLUDE_DIR) include(cmake/OpenCVFindWebP.cmake) if(WEBP_FOUND) set(HAVE_WEBP 1) @@ -184,6 +188,7 @@ if(WITH_PNG) if(BUILD_PNG) ocv_clear_vars(PNG_FOUND) else() + ocv_clear_internal_cache_vars(PNG_LIBRARY PNG_INCLUDE_DIR) include(FindPNG) if(PNG_FOUND) include(CheckIncludeFile) @@ -215,6 +220,7 @@ endif() if(WITH_OPENEXR) ocv_clear_vars(HAVE_OPENEXR) if(NOT BUILD_OPENEXR) + ocv_clear_internal_cache_vars(OPENEXR_INCLUDE_PATHS OPENEXR_LIBRARIES OPENEXR_ILMIMF_LIBRARY OPENEXR_VERSION) include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindOpenEXR.cmake") endif() diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 693a840ffe..6ae2cbcf8b 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -400,6 +400,24 @@ macro(ocv_clear_vars) endforeach() endmacro() + +# Clears passed variables with INTERNAL type from CMake cache +macro(ocv_clear_internal_cache_vars) + foreach(_var ${ARGN}) + get_property(_propertySet CACHE ${_var} PROPERTY TYPE SET) + if(_propertySet) + get_property(_type CACHE ${_var} PROPERTY TYPE) + if(_type STREQUAL "INTERNAL") + message("Cleaning INTERNAL cached variable: ${_var}") + unset(${_var} CACHE) + endif() + endif() + endforeach() + unset(_propertySet) + unset(_type) +endmacro() + + set(OCV_COMPILER_FAIL_REGEX "argument .* is not valid" # GCC 9+ (including support of unicode quotes) "command[- ]line option .* is valid for .* but not for C\\+\\+" # GNU From 6f8120cb3a2b9613bb4811d37ae4efe54265611e Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 2 Dec 2020 15:02:43 +0000 Subject: [PATCH 09/14] core(UMat): drop unavailable methods --- modules/core/include/opencv2/core/mat.hpp | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index 05edeac523..0922db9084 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -2432,20 +2432,11 @@ public: UMat(const UMat& m, const Rect& roi); UMat(const UMat& m, const Range* ranges); UMat(const UMat& m, const std::vector& ranges); + + // FIXIT copyData=false is not implemented, drop this in favor of cv::Mat (OpenCV 5.0) //! builds matrix from std::vector with or without copying the data template explicit UMat(const std::vector<_Tp>& vec, bool copyData=false); - //! builds matrix from cv::Vec; the data is copied by default - template explicit UMat(const Vec<_Tp, n>& vec, bool copyData=true); - //! builds matrix from cv::Matx; the data is copied by default - template explicit UMat(const Matx<_Tp, m, n>& mtx, bool copyData=true); - //! builds matrix from a 2D point - template explicit UMat(const Point_<_Tp>& pt, bool copyData=true); - //! builds matrix from a 3D point - template explicit UMat(const Point3_<_Tp>& pt, bool copyData=true); - //! builds matrix from comma initializer - template explicit UMat(const MatCommaInitializer_<_Tp>& commaInitializer); - //! destructor - calls release() ~UMat(); //! assignment operators From 2fa624aef0bc681c37e8bb267401c54a9e4c1df9 Mon Sep 17 00:00:00 2001 From: Jaime Rivera Date: Sun, 29 Nov 2020 21:17:24 -0800 Subject: [PATCH 10/14] Add Timestamps to MSMF Video Capture by index Enable frame timestamp tests for MSMF Add functional test for camera live timestamps Remove trailing whitespace Add timestamp test to all functional tests. Protect div by 0 Add Timestamps to MSMF Video Capture by index --- modules/videoio/src/cap_msmf.cpp | 5 ++-- modules/videoio/test/test_camera.cpp | 34 ++++++++++++++++++++++++-- modules/videoio/test/test_video_io.cpp | 2 +- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/modules/videoio/src/cap_msmf.cpp b/modules/videoio/src/cap_msmf.cpp index 15b1d2ade7..7caa7c1ea0 100644 --- a/modules/videoio/src/cap_msmf.cpp +++ b/modules/videoio/src/cap_msmf.cpp @@ -346,8 +346,6 @@ public: STDMETHODIMP OnReadSample(HRESULT hrStatus, DWORD dwStreamIndex, DWORD dwStreamFlags, LONGLONG llTimestamp, IMFSample *pSample) CV_OVERRIDE { - CV_UNUSED(llTimestamp); - HRESULT hr = 0; cv::AutoLock lock(m_mutex); @@ -360,6 +358,7 @@ public: { CV_LOG_DEBUG(NULL, "videoio(MSMF): drop frame (not processed)"); } + m_lastSampleTimestamp = llTimestamp; m_lastSample = pSample; } } @@ -439,6 +438,7 @@ public: IMFSourceReader *m_reader; DWORD m_dwStreamIndex; + LONGLONG m_lastSampleTimestamp; _ComPtr m_lastSample; }; @@ -912,6 +912,7 @@ bool CvCapture_MSMF::grabFrame() CV_LOG_WARNING(NULL, "videoio(MSMF): EOS signal. Capture stream is lost"); return false; } + sampleTime = reader->m_lastSampleTimestamp; return true; } else if (isOpen) diff --git a/modules/videoio/test/test_camera.cpp b/modules/videoio/test/test_camera.cpp index d816f637a7..e82285ad5e 100644 --- a/modules/videoio/test/test_camera.cpp +++ b/modules/videoio/test/test_camera.cpp @@ -11,21 +11,51 @@ namespace opencv_test { namespace { -static void test_readFrames(/*const*/ VideoCapture& capture, const int N = 100, Mat* lastFrame = NULL) +static void test_readFrames(/*const*/ VideoCapture& capture, const int N = 100, Mat* lastFrame = NULL, bool testTimestamps = true) { Mat frame; int64 time0 = cv::getTickCount(); + int64 sysTimePrev = time0; + const double cvTickFreq = cv::getTickFrequency(); + + double camTimePrev = 0.0; + const double fps = capture.get(cv::CAP_PROP_FPS); + const double framePeriod = fps == 0.0 ? 1. : 1.0 / fps; + + const bool validTickAndFps = cvTickFreq != 0 && fps != 0.; + testTimestamps &= validTickAndFps; + for (int i = 0; i < N; i++) { SCOPED_TRACE(cv::format("frame=%d", i)); capture >> frame; + const int64 sysTimeCurr = cv::getTickCount(); + const double camTimeCurr = capture.get(cv::CAP_PROP_POS_MSEC); ASSERT_FALSE(frame.empty()); + // Do we have a previous frame? + if (i > 0 && testTimestamps) + { + const double sysTimeElapsedSecs = (sysTimeCurr - sysTimePrev) / cvTickFreq; + const double camTimeElapsedSecs = (camTimeCurr - camTimePrev) / 1000.; + + // Check that the time between two camera frames and two system time calls + // are within 1.5 frame periods of one another. + // + // 1.5x is chosen to accomodate for a dropped frame, and an additional 50% + // to account for drift in the scale of the camera and system time domains. + EXPECT_NEAR(sysTimeElapsedSecs, camTimeElapsedSecs, framePeriod * 1.5); + } + EXPECT_GT(cvtest::norm(frame, NORM_INF), 0) << "Complete black image has been received"; + + sysTimePrev = sysTimeCurr; + camTimePrev = camTimeCurr; } + int64 time1 = cv::getTickCount(); - printf("Processed %d frames on %.2f FPS\n", N, (N * cv::getTickFrequency()) / (time1 - time0 + 1)); + printf("Processed %d frames on %.2f FPS\n", N, (N * cvTickFreq) / (time1 - time0 + 1)); if (lastFrame) *lastFrame = frame.clone(); } diff --git a/modules/videoio/test/test_video_io.cpp b/modules/videoio/test/test_video_io.cpp index 3f5617d8ce..19fc32b53e 100644 --- a/modules/videoio/test/test_video_io.cpp +++ b/modules/videoio/test/test_video_io.cpp @@ -237,7 +237,7 @@ public: if (!isBackendAvailable(apiPref, cv::videoio_registry::getStreamBackends())) throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref)); - if ((apiPref == CAP_MSMF) || ((apiPref == CAP_FFMPEG) && ((ext == "h264") || (ext == "h265")))) + if (((apiPref == CAP_FFMPEG) && ((ext == "h264") || (ext == "h265")))) throw SkipTestException(cv::String("Backend ") + cv::videoio_registry::getBackendName(apiPref) + cv::String(" does not support CAP_PROP_POS_MSEC option")); From 43e58de9183ad2bf6084cca969090ac44f705df7 Mon Sep 17 00:00:00 2001 From: Yiming Li Date: Thu, 3 Dec 2020 15:35:52 +0800 Subject: [PATCH 11/14] fix: typo --- doc/tutorials/videoio/video-write/video_write.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/videoio/video-write/video_write.markdown b/doc/tutorials/videoio/video-write/video_write.markdown index b81107559e..29b6cf6f4e 100644 --- a/doc/tutorials/videoio/video-write/video_write.markdown +++ b/doc/tutorials/videoio/video-write/video_write.markdown @@ -109,7 +109,7 @@ const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the n @code{.cpp} CV_FOURCC('P','I','M,'1') // this is an MPEG1 codec from the characters to integer @endcode - If you pass for this argument minus one than a window will pop up at runtime that contains all + If you pass for this argument minus one then a window will pop up at runtime that contains all the codec installed on your system and ask you to select the one to use: ![](images/videoCompressSelect.png) From 7f3ba5963d2d21d024c68f7decb21564365b027f Mon Sep 17 00:00:00 2001 From: Randall Britten Date: Wed, 2 Dec 2020 14:47:37 +1300 Subject: [PATCH 12/14] Fixed minor typo "poins" in documentation page --- modules/calib3d/include/opencv2/calib3d.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 812c6be108..04b5e58e23 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -91,7 +91,7 @@ respectively) by the same factor. The joint rotation-translation matrix \f$[R|t]\f$ is the matrix product of a projective transformation and a homogeneous transformation. The 3-by-4 projective transformation maps 3D points -represented in camera coordinates to 2D poins in the image plane and represented in normalized +represented in camera coordinates to 2D points in the image plane and represented in normalized camera coordinates \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$: \f[Z_c \begin{bmatrix} From b31ce408ae088e13cfbf7b3306d21a6d14c01205 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 3 Dec 2020 11:59:06 +0000 Subject: [PATCH 13/14] cmake: fix processing order of _bindings_generator - ensure that wrapped modules are already processed --- cmake/OpenCVModule.cmake | 21 ++++++++++----------- modules/python/bindings/CMakeLists.txt | 2 +- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 10ee406032..38a1bb7a7f 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -98,15 +98,6 @@ macro(ocv_add_dependencies full_modname) endforeach() unset(__depsvar) - # hack for python - set(__python_idx) - list(FIND OPENCV_MODULE_${full_modname}_WRAPPERS "python" __python_idx) - if (NOT __python_idx EQUAL -1) - list(REMOVE_ITEM OPENCV_MODULE_${full_modname}_WRAPPERS "python") - list(APPEND OPENCV_MODULE_${full_modname}_WRAPPERS "python_bindings_generator" "python2" "python3") - endif() - unset(__python_idx) - ocv_list_unique(OPENCV_MODULE_${full_modname}_REQ_DEPS) ocv_list_unique(OPENCV_MODULE_${full_modname}_OPT_DEPS) ocv_list_unique(OPENCV_MODULE_${full_modname}_PRIVATE_REQ_DEPS) @@ -209,9 +200,17 @@ macro(ocv_add_module _name) set(OPENCV_MODULES_DISABLED_USER ${OPENCV_MODULES_DISABLED_USER} "${the_module}" CACHE INTERNAL "List of OpenCV modules explicitly disabled by user") endif() - # add reverse wrapper dependencies + # add reverse wrapper dependencies (BINDINDS) foreach (wrapper ${OPENCV_MODULE_${the_module}_WRAPPERS}) - ocv_add_dependencies(opencv_${wrapper} OPTIONAL ${the_module}) + if(wrapper STREQUAL "python") # hack for python (BINDINDS) + ocv_add_dependencies(opencv_python2 OPTIONAL ${the_module}) + ocv_add_dependencies(opencv_python3 OPTIONAL ${the_module}) + else() + ocv_add_dependencies(opencv_${wrapper} OPTIONAL ${the_module}) + endif() + if(DEFINED OPENCV_MODULE_opencv_${wrapper}_bindings_generator_CLASS) + ocv_add_dependencies(opencv_${wrapper}_bindings_generator OPTIONAL ${the_module}) + endif() endforeach() # stop processing of current file diff --git a/modules/python/bindings/CMakeLists.txt b/modules/python/bindings/CMakeLists.txt index 4ad3d0c8d9..0505f1f03f 100644 --- a/modules/python/bindings/CMakeLists.txt +++ b/modules/python/bindings/CMakeLists.txt @@ -11,7 +11,7 @@ set(PYTHON_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../") # get list of modules to wrap set(OPENCV_PYTHON_MODULES) foreach(m ${OPENCV_MODULES_BUILD}) - if (";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";${MODULE_NAME};" AND HAVE_${m}) + if (";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";python;" AND HAVE_${m}) list(APPEND OPENCV_PYTHON_MODULES ${m}) #message(STATUS "\t${m}") endif() From 7e5c4fe1cdb597272e757cde435f2b56d39d1d1d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 30 Nov 2020 03:41:21 +0000 Subject: [PATCH 14/14] cmake(js): update js targets - unconditional js bindings source code generation - use common name for tests: opencv_test_js --- cmake/platforms/OpenCV-Emscripten.cmake | 1 + modules/js/CMakeLists.txt | 82 +++++++--------------- modules/js/common.cmake | 13 ++++ modules/js/generator/CMakeLists.txt | 74 +++++++++++++++++++ modules/js/{src => generator}/embindgen.py | 10 +-- modules/js/{src => generator}/templates.py | 0 modules/js/src/core_bindings.cpp | 2 +- platforms/js/build_js.py | 2 - 8 files changed, 122 insertions(+), 62 deletions(-) create mode 100644 cmake/platforms/OpenCV-Emscripten.cmake create mode 100644 modules/js/common.cmake create mode 100644 modules/js/generator/CMakeLists.txt rename modules/js/{src => generator}/embindgen.py (99%) rename modules/js/{src => generator}/templates.py (100%) diff --git a/cmake/platforms/OpenCV-Emscripten.cmake b/cmake/platforms/OpenCV-Emscripten.cmake new file mode 100644 index 0000000000..ec15fba799 --- /dev/null +++ b/cmake/platforms/OpenCV-Emscripten.cmake @@ -0,0 +1 @@ +set(OPENCV_SKIP_LINK_AS_NEEDED 1) diff --git a/modules/js/CMakeLists.txt b/modules/js/CMakeLists.txt index f3a625b37e..d82e4a26f6 100644 --- a/modules/js/CMakeLists.txt +++ b/modules/js/CMakeLists.txt @@ -1,13 +1,19 @@ # ---------------------------------------------------------------------------- # CMake file for js support # ---------------------------------------------------------------------------- -set(the_description "The js bindings") - -if(NOT BUILD_opencv_js) # should be enabled explicitly (by build_js.py script) - ocv_module_disable(js) +if(OPENCV_INITIAL_PASS) + # generator for Objective-C source code and documentation signatures + add_subdirectory(generator) endif() +if(NOT BUILD_opencv_js) # should be enabled explicitly (by build_js.py script) + return() +endif() + +set(the_description "The JavaScript(JS) bindings") + set(OPENCV_JS "opencv.js") +set(JS_HELPER "${CMAKE_CURRENT_SOURCE_DIR}/src/helpers.js") find_path(EMSCRIPTEN_INCLUDE_DIR emscripten/bind.h @@ -28,59 +34,18 @@ if(NOT EMSCRIPTEN_INCLUDE_DIR OR NOT PYTHON_DEFAULT_AVAILABLE) ocv_module_disable(js) endif() -ocv_add_module(js BINDINGS) +ocv_add_module(js BINDINGS PRIVATE_REQUIRED opencv_js_bindings_generator) ocv_module_include_directories(${EMSCRIPTEN_INCLUDE_DIR}) -# get list of modules to wrap -# message(STATUS "Wrapped in js:") -set(OPENCV_JS_MODULES) -foreach(m ${OPENCV_MODULES_BUILD}) - if(";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";js;" AND HAVE_${m}) - list(APPEND OPENCV_JS_MODULES ${m}) - # message(STATUS "\t${m}") - endif() -endforeach() - -set(opencv_hdrs "") -foreach(m ${OPENCV_JS_MODULES}) - list(APPEND opencv_hdrs ${OPENCV_MODULE_${m}_HEADERS}) -endforeach(m) - -# header blacklist -ocv_list_filterout(opencv_hdrs "modules/.*.h$") -ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda") -ocv_list_filterout(opencv_hdrs "modules/core/.*/opencl") -ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/opengl.hpp") -ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/ocl.hpp") -ocv_list_filterout(opencv_hdrs "modules/cuda.*") -ocv_list_filterout(opencv_hdrs "modules/cudev") -ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/") -ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker.hpp") # Conditional compilation -ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/.*") - -file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${opencv_hdrs}") - -set(bindings_cpp "${CMAKE_CURRENT_BINARY_DIR}/bindings.cpp") - -set(scripts_hdr_parser "${CMAKE_CURRENT_SOURCE_DIR}/../python/src2/hdr_parser.py") - -set(JS_HELPER "${CMAKE_CURRENT_SOURCE_DIR}/src/helpers.js") - -add_custom_command( - OUTPUT ${bindings_cpp} - COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/embindgen.py" ${scripts_hdr_parser} ${bindings_cpp} "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${CMAKE_CURRENT_SOURCE_DIR}/src/core_bindings.cpp" - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/core_bindings.cpp - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/embindgen.py - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/templates.py - DEPENDS ${scripts_hdr_parser} - #(not needed - generated by CMake) DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/headers.txt - DEPENDS ${opencv_hdrs} - DEPENDS ${JS_HELPER}) - add_definitions("-std=c++11") -link_libraries(${OPENCV_MODULE_${the_module}_DEPS}) +set(deps ${OPENCV_MODULE_${the_module}_DEPS}) +list(REMOVE_ITEM deps opencv_js_bindings_generator) # don't add dummy module +link_libraries(${deps}) + +set(bindings_cpp "${OPENCV_JS_BINDINGS_DIR}/gen/bindings.cpp") +set_source_files_properties(${bindings_cpp} PROPERTIES GENERATED TRUE) OCV_OPTION(BUILD_WASM_INTRIN_TESTS "Build WASM intrin tests" OFF ) if(BUILD_WASM_INTRIN_TESTS) @@ -94,12 +59,17 @@ else() ocv_add_executable(${the_module} ${bindings_cpp}) endif() +add_dependencies(${the_module} gen_opencv_js_source) + set_target_properties(${the_module} PROPERTIES COMPILE_FLAGS "-Wno-missing-prototypes") +#set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s NODERAWFS=0") set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} --memory-init-file 0 -s TOTAL_MEMORY=128MB -s WASM_MEM_MAX=1GB -s ALLOW_MEMORY_GROWTH=1") set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s MODULARIZE=1 -s SINGLE_FILE=1") set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s EXPORT_NAME=\"'cv'\" -s DEMANGLE_SUPPORT=1") set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s FORCE_FILESYSTEM=1 --use-preload-plugins --bind --post-js ${JS_HELPER} -Wno-missing-prototypes") +#set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} --use-preload-plugins --bind --post-js ${JS_HELPER} -Wno-missing-prototypes") +#set(EMSCRIPTEN_LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS} -s FORCE_FILESYSTEM=1 --bind --post-js ${JS_HELPER} -Wno-missing-prototypes") set_target_properties(${the_module} PROPERTIES LINK_FLAGS "${EMSCRIPTEN_LINK_FLAGS}") # add UMD wrapper @@ -150,7 +120,7 @@ add_custom_command(OUTPUT "${opencv_test_js_bin_dir}/${test_data}" ) list(APPEND opencv_test_js_file_deps "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}") -add_custom_target(${PROJECT_NAME}_test ALL +add_custom_target(${PROJECT_NAME}_test DEPENDS ${OCV_JS_PATH} ${opencv_test_js_file_deps}) # perf @@ -173,7 +143,7 @@ foreach(f ${perf_files}) list(APPEND opencv_perf_js_file_deps "${perf_dir}/${f}" "${opencv_perf_js_bin_dir}/${f}") endforeach() -add_custom_target(${PROJECT_NAME}_perf ALL +add_custom_target(${PROJECT_NAME}_perf DEPENDS ${OCV_JS_PATH} ${opencv_perf_js_file_deps}) #loader @@ -193,4 +163,6 @@ add_custom_command( list(APPEND opencv_loader_js_file_deps "${loader_dir}/loader.js" "${opencv_loader_js_bin_dir}/loader.js") add_custom_target(${PROJECT_NAME}_loader ALL - DEPENDS ${OCV_JS_PATH} ${opencv_loader_js_file_deps}) \ No newline at end of file + DEPENDS ${OCV_JS_PATH} ${opencv_loader_js_file_deps}) + +add_custom_target(opencv_test_js ALL DEPENDS opencv_js_test opencv_js_perf opencv_js_loader) diff --git a/modules/js/common.cmake b/modules/js/common.cmake new file mode 100644 index 0000000000..192bcca4ea --- /dev/null +++ b/modules/js/common.cmake @@ -0,0 +1,13 @@ +# get list of modules to wrap +if(HAVE_opencv_js) + message(STATUS "Wrapped in JavaScript(js):") +endif() +set(OPENCV_JS_MODULES "") +foreach(m ${OPENCV_MODULES_BUILD}) + if(";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";js;" AND HAVE_${m}) + list(APPEND OPENCV_JS_MODULES ${m}) + if(HAVE_opencv_js) + message(STATUS " ${m}") + endif() + endif() +endforeach() diff --git a/modules/js/generator/CMakeLists.txt b/modules/js/generator/CMakeLists.txt new file mode 100644 index 0000000000..75c8a03545 --- /dev/null +++ b/modules/js/generator/CMakeLists.txt @@ -0,0 +1,74 @@ +set(MODULE_NAME "js_bindings_generator") +set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE) +ocv_add_module(${MODULE_NAME} INTERNAL) + +set(OPENCV_JS_BINDINGS_DIR "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "") +file(REMOVE_RECURSE "${OPENCV_JS_BINDINGS_DIR}/gen") +file(MAKE_DIRECTORY "${OPENCV_JS_BINDINGS_DIR}/gen") +file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_js_source") # force re-run after CMake + +# This file is included from a subdirectory +set(JS_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/..") +include(${JS_SOURCE_DIR}/common.cmake) # fill OPENCV_JS_MODULES + +set(opencv_hdrs "") +foreach(m ${OPENCV_JS_MODULES}) + list(APPEND opencv_hdrs ${OPENCV_MODULE_${m}_HEADERS}) +endforeach(m) + +# header blacklist +ocv_list_filterout(opencv_hdrs "modules/.*.h$") +ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda") +ocv_list_filterout(opencv_hdrs "modules/core/.*/opencl") +ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/opengl.hpp") +ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/ocl.hpp") +ocv_list_filterout(opencv_hdrs "modules/cuda.*") +ocv_list_filterout(opencv_hdrs "modules/cudev") +ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/") +ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker.hpp") # Conditional compilation +ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/.*") + +ocv_update_file("${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${opencv_hdrs}") + +set(bindings_cpp "${OPENCV_JS_BINDINGS_DIR}/gen/bindings.cpp") + +set(scripts_hdr_parser "${JS_SOURCE_DIR}/../python/src2/hdr_parser.py") + +if(DEFINED ENV{OPENCV_JS_WHITELIST}) + set(OPENCV_JS_WHITELIST_FILE "$ENV{OPENCV_JS_WHITELIST}") +else() + set(OPENCV_JS_WHITELIST_FILE "${OpenCV_SOURCE_DIR}/platforms/js/opencv_js.config.py") +endif() + +add_custom_command( + OUTPUT ${bindings_cpp} "${OPENCV_DEPHELPER}/gen_opencv_js_source" + COMMAND + ${PYTHON_DEFAULT_EXECUTABLE} + "${CMAKE_CURRENT_SOURCE_DIR}/embindgen.py" + "${scripts_hdr_parser}" + "${bindings_cpp}" + "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" + "${JS_SOURCE_DIR}/src/core_bindings.cpp" + "${OPENCV_JS_WHITELIST_FILE}" + COMMAND + ${CMAKE_COMMAND} -E touch "${OPENCV_DEPHELPER}/gen_opencv_js_source" + WORKING_DIRECTORY + "${CMAKE_CURRENT_BINARY_DIR}/gen" + DEPENDS + ${JS_SOURCE_DIR}/src/core_bindings.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/embindgen.py + ${CMAKE_CURRENT_SOURCE_DIR}/templates.py + ${scripts_hdr_parser} + #(not needed - generated by CMake) ${CMAKE_CURRENT_BINARY_DIR}/headers.txt + ${opencv_hdrs} + COMMENT "Generate source files for JavaScript bindings" +) + +add_custom_target(gen_opencv_js_source + # excluded from all: ALL + DEPENDS ${bindings_cpp} "${OPENCV_DEPHELPER}/gen_opencv_js_source" + SOURCES + ${JS_SOURCE_DIR}/src/core_bindings.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/embindgen.py + ${CMAKE_CURRENT_SOURCE_DIR}/templates.py +) diff --git a/modules/js/src/embindgen.py b/modules/js/generator/embindgen.py similarity index 99% rename from modules/js/src/embindgen.py rename to modules/js/generator/embindgen.py index 0ec4488946..6e2bac71a1 100644 --- a/modules/js/src/embindgen.py +++ b/modules/js/generator/embindgen.py @@ -104,8 +104,6 @@ def makeWhiteList(module_list): return wl white_list = None -exec(open(os.environ["OPENCV_JS_WHITELIST"]).read()) -assert(white_list) # Features to be exported export_enums = False @@ -891,10 +889,10 @@ class JSWrapperGenerator(object): if __name__ == "__main__": - if len(sys.argv) < 4: + if len(sys.argv) < 5: print("Usage:\n", \ os.path.basename(sys.argv[0]), \ - " ") + " ") print("Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv])) exit(0) @@ -908,5 +906,9 @@ if __name__ == "__main__": bindingsCpp = sys.argv[2] headers = open(sys.argv[3], 'r').read().split(';') coreBindings = sys.argv[4] + whiteListFile = sys.argv[5] + exec(open(whiteListFile).read()) + assert(white_list) + generator = JSWrapperGenerator() generator.gen(bindingsCpp, headers, coreBindings) diff --git a/modules/js/src/templates.py b/modules/js/generator/templates.py similarity index 100% rename from modules/js/src/templates.py rename to modules/js/generator/templates.py diff --git a/modules/js/src/core_bindings.cpp b/modules/js/src/core_bindings.cpp index ceeb641c7c..a43fb726de 100644 --- a/modules/js/src/core_bindings.cpp +++ b/modules/js/src/core_bindings.cpp @@ -87,7 +87,7 @@ namespace hal { using namespace emscripten; using namespace cv; #ifdef HAVE_OPENCV_DNN -using namespace dnn; +using namespace cv::dnn; #endif namespace binding_utils diff --git a/platforms/js/build_js.py b/platforms/js/build_js.py index 38e988a3bd..cd22db0f02 100644 --- a/platforms/js/build_js.py +++ b/platforms/js/build_js.py @@ -129,11 +129,9 @@ class Builder: "-DBUILD_opencv_superres=OFF", "-DBUILD_opencv_stitching=OFF", "-DBUILD_opencv_java=OFF", - "-DBUILD_opencv_java_bindings_generator=OFF", "-DBUILD_opencv_js=ON", "-DBUILD_opencv_python2=OFF", "-DBUILD_opencv_python3=OFF", - "-DBUILD_opencv_python_bindings_generator=OFF", "-DBUILD_EXAMPLES=OFF", "-DBUILD_PACKAGE=OFF", "-DBUILD_TESTS=OFF",