Skip to content

Commit 863d6ad

Browse files
committed
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
2 parents fa50918 + 6ae9809 commit 863d6ad

File tree

10 files changed

+71
-16
lines changed

10 files changed

+71
-16
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,4 +57,4 @@ In order to keep a clean overview containing all contributed modules the followi
5757

5858
1. Update the README.md file under the modules folder. Here you add your model with a single line description.
5959

60-
2. Add a README.md inside your own module folder. This README explains which functionality (seperate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also.
60+
2. Add a README.md inside your own module folder. This README explains which functionality (separate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also.

modules/cnn_3dobj/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ $ ./example_cnn_3dobj_classify -mean_file=../data/images_mean/triplet_mean.binar
7979
```
8080
===========================================================
8181
##Demo3: Model performance test
82-
####This demo will run a performance test of a trained CNN model on several images. If the the model fails on telling different samples from seperate classes apart, or is confused on samples with similar pose but from different classes, this will give some information for model analysis.
82+
####This demo will run a performance test of a trained CNN model on several images. If the the model fails on telling different samples from separate classes apart, or is confused on samples with similar pose but from different classes, this will give some information for model analysis.
8383
```
8484
$ ./example_cnn_3dobj_model_analysis
8585
```

modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ the use of this software, even if advised of the possibility of such damage.
7373
7474
As CNN based learning algorithm shows better performance on the classification issues,
7575
the rich labeled data could be more useful in the training stage. 3D object classification and pose estimation
76-
is a jointed mission aimming at seperate different posed apart in the descriptor form.
76+
is a jointed mission aiming at separate different posed apart in the descriptor form.
7777
7878
In the training stage, we prepare 2D training images generated from our module with their
7979
class label and pose label. We fully exploit the information lies in their labels

modules/ovis/include/opencv2/ovis.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ namespace ovis {
1818

1919
enum SceneSettings
2020
{
21-
/// the window will use a seperate scene. The scene will be shared otherwise.
22-
SCENE_SEPERATE = 1,
21+
/// the window will use a separate scene. The scene will be shared otherwise.
22+
SCENE_SEPARATE = 1,
2323
/// allow the user to control the camera.
2424
SCENE_INTERACTIVE = 2,
2525
/// draw coordinate system crosses for debugging

modules/ovis/samples/ovis_demo.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ int main()
4343
owin->playEntityAnimation("sinbad", "IdleTop");
4444

4545
//interaction scene
46-
Ptr<ovis::WindowScene> iwin = ovis::createWindow(String("AR"), imsize, ovis::SCENE_SEPERATE | ovis::SCENE_INTERACTIVE);
46+
Ptr<ovis::WindowScene> iwin = ovis::createWindow(String("AR"), imsize, ovis::SCENE_SEPARATE | ovis::SCENE_INTERACTIVE);
4747
iwin->createEntity("sinbad", "Sinbad.mesh", Vec3i(0, -5, 0), Vec3f(CV_PI, 0.0, 0.0));
4848
iwin->createLightEntity("sun", Vec3i(0, 0, -100));
4949
iwin->setCameraIntrinsics(K, imsize);

modules/ovis/samples/ovis_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
owin.playEntityAnimation("sinbad", "IdleTop")
2424

2525
# interaction scene
26-
iwin = cv.ovis.createWindow("AR", imsize, cv.ovis.SCENE_SEPERATE | cv.ovis.SCENE_INTERACTIVE)
26+
iwin = cv.ovis.createWindow("AR", imsize, cv.ovis.SCENE_SEPARATE | cv.ovis.SCENE_INTERACTIVE)
2727
iwin.createEntity("sinbad", "Sinbad.mesh", tvec=(0, -5, 0), rot=(np.pi, 0, 0))
2828
iwin.createLightEntity("sun", (0, 0, -100))
2929
iwin.setCameraIntrinsics(K, imsize)

modules/ovis/src/ovis.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -302,10 +302,10 @@ class WindowSceneImpl : public WindowScene
302302
{
303303
if (!app->sceneMgr)
304304
{
305-
flags |= SCENE_SEPERATE;
305+
flags |= SCENE_SEPARATE;
306306
}
307307

308-
if (flags & SCENE_SEPERATE)
308+
if (flags & SCENE_SEPARATE)
309309
{
310310
sceneMgr = root->createSceneManager("DefaultSceneManager", title);
311311
RTShader::ShaderGenerator& shadergen = RTShader::ShaderGenerator::getSingleton();
@@ -362,7 +362,7 @@ class WindowSceneImpl : public WindowScene
362362

363363
~WindowSceneImpl()
364364
{
365-
if (flags & SCENE_SEPERATE)
365+
if (flags & SCENE_SEPARATE)
366366
{
367367
TextureManager& texMgr = TextureManager::getSingleton();
368368

@@ -378,7 +378,7 @@ class WindowSceneImpl : public WindowScene
378378
}
379379
}
380380

381-
if(_app->sceneMgr == sceneMgr && (flags & SCENE_SEPERATE))
381+
if(_app->sceneMgr == sceneMgr && (flags & SCENE_SEPARATE))
382382
{
383383
// this is the root window owning the context
384384
CV_Assert(_app->numWindows() == 1 && "the first OVIS window must be deleted last");

modules/saliency/samples/computeSaliency.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ int main( int argc, char** argv )
157157
int ndet = int(saliencyMap.size());
158158
std::cout << "Objectness done " << ndet << std::endl;
159159
// The result are sorted by objectness. We only use the first maxd boxes here.
160-
int maxd = 7, step = 255 / maxd, jitter=9; // jitter to seperate single rects
160+
int maxd = 7, step = 255 / maxd, jitter=9; // jitter to separate single rects
161161
Mat draw = image.clone();
162162
for (int i = 0; i < std::min(maxd, ndet); i++) {
163163
Vec4i bb = saliencyMap[i];

modules/ximgproc/samples/edgeboxes_demo.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,17 @@
2727
edge_boxes = cv.ximgproc.createEdgeBoxes()
2828
edge_boxes.setMaxBoxes(30)
2929
boxes = edge_boxes.getBoundingBoxes(edges, orimap)
30-
31-
for b in boxes:
32-
x, y, w, h = b
33-
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
30+
boxes, scores = edge_boxes.getBoundingBoxes(edges, orimap)
31+
32+
if len(boxes) > 0:
33+
boxes_scores = zip(boxes, scores)
34+
for b_s in boxes_scores:
35+
box = b_s[0]
36+
x, y, w, h = box
37+
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
38+
score = b_s[1][0]
39+
cv.putText(im, "{:.2f}".format(score), (x, y), cv.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255), 1, cv.LINE_AA)
40+
print("Box at (x,y)=({:d},{:d}); score={:f}".format(x, y, score))
3441

3542
cv.imshow("edges", edges)
3643
cv.imshow("edgeboxes", im)
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
// This file is part of OpenCV project.
2+
// It is subject to the license terms in the LICENSE file found in the top-level directory
3+
// of this distribution and at http://opencv.org/license.html.
4+
#include "test_precomp.hpp"
5+
6+
namespace opencv_test { namespace {
7+
8+
TEST(ximpgroc_Edgeboxes, DISABLED_regression)
9+
{
10+
//Testing Edgeboxes implementation by asking for one proposal
11+
//on a simple test image from the PASCAL VOC 2012 dataset.
12+
std::vector<Rect> boxes;
13+
std::vector<float> scores;
14+
float expectedScore = 0.48742563f;
15+
Rect expectedProposal(158, 69, 125, 154);
16+
17+
//Using sample model file, compute orientations map for use with edge detection.
18+
cv::String testImagePath = cvtest::TS::ptr()->get_data_path() + "cv/ximgproc/" + "pascal_voc_bird.png";
19+
Mat testImg = imread(testImagePath);
20+
ASSERT_FALSE(testImg.empty()) << "Could not load input image " << testImagePath;
21+
cvtColor(testImg, testImg, COLOR_BGR2RGB);
22+
testImg.convertTo(testImg, CV_32F, 1.0 / 255.0f);
23+
24+
//Use the model for structured edge detection that is already provided in opencv_extra.
25+
cv::String model_path = cvtest::TS::ptr()->get_data_path() + "cv/ximgproc/" + "model.yml.gz";
26+
Ptr<StructuredEdgeDetection> sed = createStructuredEdgeDetection(model_path);
27+
Mat edgeImage, edgeOrientations;
28+
sed->detectEdges(testImg, edgeImage);
29+
sed->computeOrientation(edgeImage, edgeOrientations);
30+
31+
//Obtain one proposal and its score from Edgeboxes.
32+
Ptr<EdgeBoxes> edgeboxes = createEdgeBoxes();
33+
edgeboxes->setMaxBoxes(1);
34+
edgeboxes->getBoundingBoxes(edgeImage, edgeOrientations, boxes, scores);
35+
36+
//We asked for one proposal and thus one score, we better get one back only.
37+
ASSERT_TRUE(boxes.size() == 1);
38+
ASSERT_TRUE(scores.size() == 1);
39+
40+
//Check the proposal and its score.
41+
EXPECT_NEAR(scores[0], expectedScore, 1e-8);
42+
EXPECT_EQ(expectedProposal.x, boxes[0].x);
43+
EXPECT_EQ(expectedProposal.y, boxes[0].y);
44+
EXPECT_EQ(expectedProposal.height, boxes[0].height);
45+
EXPECT_EQ(expectedProposal.width, boxes[0].width);
46+
}
47+
48+
}} // namespace

0 commit comments

Comments
 (0)