Skip to content

Commit 66d7956

Browse files
committed
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
2 parents 3df83dc + 1f519ee commit 66d7956

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+428
-1716
lines changed

CMakeLists.txt

+2-1
Original file line numberDiff line numberDiff line change
@@ -946,7 +946,8 @@ if(CV_TRACE)
946946
include(cmake/OpenCVDetectTrace.cmake)
947947
endif()
948948

949-
ocv_cmake_hook(POST_DETECT_DEPENDECIES)
949+
ocv_cmake_hook(POST_DETECT_DEPENDECIES) # typo, deprecated (2019-06)
950+
ocv_cmake_hook(POST_DETECT_DEPENDENCIES)
950951

951952
# ----------------------------------------------------------------------------
952953
# Solution folders:

modules/calib3d/src/undistort.cpp

+6
Original file line numberDiff line numberDiff line change
@@ -482,6 +482,12 @@ static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvM
482482
break;
483483
double r2 = x*x + y*y;
484484
double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
485+
if (icdist < 0) // test: undistortPoints.regression_14583
486+
{
487+
x = (u - cx)*ifx;
488+
y = (v - cy)*ify;
489+
break;
490+
}
485491
double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x)+ k[8]*r2+k[9]*r2*r2;
486492
double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y+ k[10]*r2+k[11]*r2*r2;
487493
x = (x0 - deltaX)*icdist;

modules/calib3d/test/test_undistort_points.cpp

+26
Original file line numberDiff line numberDiff line change
@@ -119,4 +119,30 @@ TEST(Calib3d_Undistort, stop_criteria)
119119
ASSERT_LE(obtainedError, maxError);
120120
}
121121

122+
TEST(undistortPoints, regression_14583)
123+
{
124+
const int col = 720;
125+
// const int row = 540;
126+
float camera_matrix_value[] = {
127+
437.8995f, 0.0f, 342.9241f,
128+
0.0f, 438.8216f, 273.7163f,
129+
0.0f, 0.0f, 1.0f
130+
};
131+
cv::Mat camera_interior(3, 3, CV_32F, camera_matrix_value);
132+
133+
float camera_distort_value[] = {-0.34329f, 0.11431f, 0.0f, 0.0f, -0.017375f};
134+
cv::Mat camera_distort(1, 5, CV_32F, camera_distort_value);
135+
136+
float distort_points_value[] = {col, 0.};
137+
cv::Mat distort_pt(1, 1, CV_32FC2, distort_points_value);
138+
139+
cv::Mat undistort_pt;
140+
cv::undistortPoints(distort_pt, undistort_pt, camera_interior,
141+
camera_distort, cv::Mat(), camera_interior);
142+
143+
EXPECT_NEAR(distort_pt.at<Vec2f>(0)[0], undistort_pt.at<Vec2f>(0)[0], col / 2)
144+
<< "distort point: " << distort_pt << std::endl
145+
<< "undistort point: " << undistort_pt;
146+
}
147+
122148
}} // namespace

modules/core/include/opencv2/core/hal/intrin_avx512.hpp

+82-82
Large diffs are not rendered by default.

modules/dnn/include/opencv2/dnn/all_layers.hpp

+1-4
Original file line numberDiff line numberDiff line change
@@ -492,10 +492,7 @@ CV__DNN_INLINE_NS_BEGIN
492492
class CV_EXPORTS CropLayer : public Layer
493493
{
494494
public:
495-
int startAxis;
496-
std::vector<int> offset;
497-
498-
static Ptr<CropLayer> create(const LayerParams &params);
495+
static Ptr<Layer> create(const LayerParams &params);
499496
};
500497

501498
class CV_EXPORTS EltwiseLayer : public Layer

modules/dnn/src/dnn.cpp

-97
Original file line numberDiff line numberDiff line change
@@ -735,20 +735,9 @@ struct DataLayer : public Layer
735735
}
736736
biases->set(biasesVec);
737737

738-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
739738
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
740739
addConstantData("weights", weights, ieLayer);
741740
addConstantData("biases", biases, ieLayer);
742-
#else
743-
InferenceEngine::LayerParams lp;
744-
lp.name = name;
745-
lp.type = "ScaleShift";
746-
lp.precision = InferenceEngine::Precision::FP32;
747-
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
748-
749-
ieLayer->_weights = weights;
750-
ieLayer->_biases = biases;
751-
#endif
752741
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
753742
#endif // HAVE_INF_ENGINE
754743
return Ptr<BackendNode>();
@@ -1488,11 +1477,7 @@ struct Net::Impl
14881477
if (layerNet != ieInpNode->net)
14891478
{
14901479
// layerNet is empty or nodes are from different graphs.
1491-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
14921480
ieInpNode->net->addOutput(ieInpNode->layer.getName());
1493-
#else
1494-
ieInpNode->net->addOutput(ieInpNode->layer->name);
1495-
#endif
14961481
}
14971482
}
14981483
}
@@ -1642,25 +1627,6 @@ struct Net::Impl
16421627
}
16431628
}
16441629

1645-
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
1646-
// The same blobs wrappers cannot be shared between two Inference Engine
1647-
// networks because of explicit references between layers and blobs.
1648-
// So we need to rewrap all the external blobs.
1649-
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
1650-
{
1651-
LayerPin inPin = ld.inputBlobsId[i];
1652-
auto it = netBlobsWrappers.find(inPin);
1653-
if (it == netBlobsWrappers.end())
1654-
{
1655-
ld.inputBlobsWrappers[i] = InfEngineBackendWrapper::create(ld.inputBlobsWrappers[i]);
1656-
netBlobsWrappers[inPin] = ld.inputBlobsWrappers[i];
1657-
}
1658-
else
1659-
ld.inputBlobsWrappers[i] = it->second;
1660-
}
1661-
netBlobsWrappers[LayerPin(ld.id, 0)] = ld.outputBlobsWrappers[0];
1662-
#endif // IE < R5
1663-
16641630
Ptr<BackendNode> node;
16651631
if (!net.empty())
16661632
{
@@ -1691,7 +1657,6 @@ struct Net::Impl
16911657
ieNode->net = net;
16921658

16931659
// Convert weights in FP16 for specific targets.
1694-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
16951660
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
16961661
preferableTarget == DNN_TARGET_MYRIAD ||
16971662
preferableTarget == DNN_TARGET_FPGA) && !fused)
@@ -1733,47 +1698,6 @@ struct Net::Impl
17331698
net->addBlobs(ld.inputBlobsWrappers);
17341699
net->addBlobs(ld.outputBlobsWrappers);
17351700
addInfEngineNetOutputs(ld);
1736-
1737-
#else // IE >= R5
1738-
1739-
auto weightableLayer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(ieNode->layer);
1740-
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
1741-
preferableTarget == DNN_TARGET_MYRIAD ||
1742-
preferableTarget == DNN_TARGET_FPGA) && !fused)
1743-
{
1744-
ieNode->layer->precision = InferenceEngine::Precision::FP16;
1745-
if (weightableLayer)
1746-
{
1747-
if (weightableLayer->_weights)
1748-
weightableLayer->_weights = convertFp16(weightableLayer->_weights);
1749-
if (weightableLayer->_biases)
1750-
weightableLayer->_biases = convertFp16(weightableLayer->_biases);
1751-
}
1752-
else
1753-
{
1754-
for (const auto& weights : {"weights", "biases"})
1755-
{
1756-
auto it = ieNode->layer->blobs.find(weights);
1757-
if (it != ieNode->layer->blobs.end())
1758-
it->second = convertFp16(it->second);
1759-
}
1760-
}
1761-
}
1762-
if (weightableLayer)
1763-
{
1764-
if (weightableLayer->_weights)
1765-
weightableLayer->blobs["weights"] = weightableLayer->_weights;
1766-
if (weightableLayer->_biases)
1767-
weightableLayer->blobs["biases"] = weightableLayer->_biases;
1768-
}
1769-
ieNode->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers);
1770-
net->addBlobs(ld.inputBlobsWrappers);
1771-
net->addBlobs(ld.outputBlobsWrappers);
1772-
1773-
if (!fused)
1774-
net->addLayer(ieNode->layer);
1775-
addInfEngineNetOutputs(ld);
1776-
#endif // IE >= R5
17771701
}
17781702

17791703
// Initialize all networks.
@@ -1795,23 +1719,6 @@ struct Net::Impl
17951719

17961720
if (!ieNode->net->isInitialized())
17971721
{
1798-
#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R4)
1799-
// For networks which is built in runtime we need to specify a
1800-
// version of it's hyperparameters.
1801-
std::string versionTrigger = "<net name=\"TestInput\" version=\"3\" batch=\"1\">"
1802-
"<layers>"
1803-
"<layer name=\"data\" type=\"Input\" precision=\"FP32\" id=\"0\">"
1804-
"<output>"
1805-
"<port id=\"0\">"
1806-
"<dim>1</dim>"
1807-
"</port>"
1808-
"</output>"
1809-
"</layer>"
1810-
"</layers>"
1811-
"</net>";
1812-
InferenceEngine::CNNNetReader reader;
1813-
reader.ReadNetwork(versionTrigger.data(), versionTrigger.size());
1814-
#endif
18151722
ieNode->net->init(preferableTarget);
18161723
ld.skip = false;
18171724
}
@@ -2693,11 +2600,7 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
26932600
Net cvNet;
26942601
cvNet.setInputsNames(inputsNames);
26952602

2696-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
26972603
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
2698-
#else
2699-
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(0));
2700-
#endif
27012604
backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
27022605
for (auto& it : ieNet.getOutputsInfo())
27032606
{

modules/dnn/src/layers/batch_norm_layer.cpp

+2-17
Original file line numberDiff line numberDiff line change
@@ -351,31 +351,16 @@ class BatchNormLayerImpl CV_FINAL : public BatchNormLayer
351351
}
352352
#endif // HAVE_HALIDE
353353

354+
#ifdef HAVE_INF_ENGINE
354355
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
355356
{
356-
#ifdef HAVE_INF_ENGINE
357-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
358357
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
359358
const size_t numChannels = weights_.total();
360359
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
361360
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
362361
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
363-
#else
364-
InferenceEngine::LayerParams lp;
365-
lp.name = name;
366-
lp.type = "ScaleShift";
367-
lp.precision = InferenceEngine::Precision::FP32;
368-
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
369-
370-
const size_t numChannels = weights_.total();
371-
ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C);
372-
ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C);
373-
374-
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
375-
#endif
376-
#endif // HAVE_INF_ENGINE
377-
return Ptr<BackendNode>();
378362
}
363+
#endif // HAVE_INF_ENGINE
379364

380365
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
381366
const std::vector<MatShape> &outputs) const CV_OVERRIDE

modules/dnn/src/layers/blank_layer.cpp

+4-17
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,12 @@ class BlankLayerImpl CV_FINAL : public BlankLayer
107107
inputs[i].copyTo(outputs[i]);
108108
}
109109

110+
#ifdef HAVE_INF_ENGINE
110111
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
111112
{
112-
#ifdef HAVE_INF_ENGINE
113113
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
114114
CV_Assert(!input->dims.empty());
115-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
115+
116116
InferenceEngine::Builder::Layer ieLayer(name);
117117
ieLayer.setName(name);
118118
if (preferableTarget == DNN_TARGET_MYRIAD)
@@ -122,29 +122,16 @@ class BlankLayerImpl CV_FINAL : public BlankLayer
122122
else
123123
{
124124
ieLayer.setType("Split");
125-
ieLayer.getParameters()["axis"] = (size_t)0;
125+
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
126126
ieLayer.getParameters()["out_sizes"] = input->dims[0];
127127
}
128128
std::vector<size_t> shape(input->dims);
129129
std::reverse(shape.begin(), shape.end());
130130
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
131131
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
132132
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
133-
#else
134-
InferenceEngine::LayerParams lp;
135-
lp.name = name;
136-
lp.type = "Split";
137-
lp.precision = InferenceEngine::Precision::FP32;
138-
std::shared_ptr<InferenceEngine::SplitLayer> ieLayer(new InferenceEngine::SplitLayer(lp));
139-
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
140-
ieLayer->params["axis"] = format("%d", (int)input->dims.size() - 1);
141-
ieLayer->params["out_sizes"] = format("%d", (int)input->dims[0]);
142-
#endif
143-
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
144-
#endif
145-
#endif // HAVE_INF_ENGINE
146-
return Ptr<BackendNode>();
147133
}
134+
#endif // HAVE_INF_ENGINE
148135
};
149136

150137
Ptr<Layer> BlankLayer::create(const LayerParams& params)

modules/dnn/src/layers/concat_layer.cpp

+2-14
Original file line numberDiff line numberDiff line change
@@ -310,29 +310,17 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
310310
return Ptr<BackendNode>();
311311
}
312312

313+
#ifdef HAVE_INF_ENGINE
313314
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
314315
{
315-
#ifdef HAVE_INF_ENGINE
316-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
317316
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
318317

319318
InferenceEngine::Builder::ConcatLayer ieLayer(name);
320319
ieLayer.setAxis(clamp(axis, input->dims.size()));
321320
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
322321
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
323-
#else
324-
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
325-
InferenceEngine::LayerParams lp;
326-
lp.name = name;
327-
lp.type = "Concat";
328-
lp.precision = InferenceEngine::Precision::FP32;
329-
std::shared_ptr<InferenceEngine::ConcatLayer> ieLayer(new InferenceEngine::ConcatLayer(lp));
330-
ieLayer->_axis = clamp(axis, input->dims.size());
331-
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
332-
#endif
333-
#endif // HAVE_INF_ENGINE
334-
return Ptr<BackendNode>();
335322
}
323+
#endif // HAVE_INF_ENGINE
336324
};
337325

338326
Ptr<ConcatLayer> ConcatLayer::create(const LayerParams& params)

0 commit comments

Comments
 (0)