Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions modules/core/include/opencv2/core/bindings_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
#include <opencv2/core/async.hpp>
#include <opencv2/core/detail/async_promise.hpp>

#include <stdexcept>

namespace cv { namespace utils {
//! @addtogroup core_utils
//! @{
Expand Down Expand Up @@ -113,6 +115,12 @@ String dumpRange(const Range& argument)
}
}

CV_WRAP static inline
void testRaiseGeneralException()
{
throw std::runtime_error("exception text");
}

CV_WRAP static inline
AsyncArray testAsyncArray(InputArray argument)
{
Expand Down
26 changes: 19 additions & 7 deletions modules/dnn/include/opencv2/dnn/shape_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,21 +205,33 @@ static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape)
return out;
}

inline int clamp(int ax, int dims)
/// @brief Converts axis from `[-dims; dims)` (similar to Python's slice notation) to `[0; dims)` range.
static inline
int normalize_axis(int axis, int dims)
{
return ax < 0 ? ax + dims : ax;
CV_Check(axis, axis >= -dims && axis < dims, "");
axis = (axis < 0) ? (dims + axis) : axis;
CV_DbgCheck(axis, axis >= 0 && axis < dims, "");
return axis;
}

inline int clamp(int ax, const MatShape& shape)
static inline
int normalize_axis(int axis, const MatShape& shape)
{
return clamp(ax, (int)shape.size());
return normalize_axis(axis, (int)shape.size());
}

inline Range clamp(const Range& r, int axisSize)
static inline
Range normalize_axis_range(const Range& r, int axisSize)
{
Range clamped(std::max(r.start, 0),
if (r == Range::all())
return Range(0, axisSize);
CV_CheckGE(r.start, 0, "");
Range clamped(r.start,
r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1);
CV_Assert_N(clamped.start < clamped.end, clamped.end <= axisSize);
CV_DbgCheckGE(clamped.start, 0, "");
CV_CheckLT(clamped.start, clamped.end, "");
CV_CheckLE(clamped.end, axisSize, "");
return clamped;
}

Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/include/opencv2/dnn/version.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#define OPENCV_DNN_VERSION_HPP

/// Use with major OpenCV version only.
#define OPENCV_DNN_API_VERSION 20201117
#define OPENCV_DNN_API_VERSION 20210205

#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/dnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2972,7 +2972,7 @@ struct Net::Impl : public detail::NetImplBase
// the concatenation optimization is applied with batch_size > 1.
// so, for now, we only apply this optimization in the most popular
// case batch_size == 1.
int axis = clamp(concatLayer->axis, output.dims);
int axis = normalize_axis(concatLayer->axis, output.dims);
if( output.total(0, axis) == 1 )
{
size_t i, ninputs = ld.inputBlobsId.size();
Expand Down
14 changes: 7 additions & 7 deletions modules/dnn/src/layers/concat_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
{
CV_Assert(inputs.size() > 0);
outputs.resize(1, inputs[0]);
int cAxis = clamp(axis, inputs[0]);
int cAxis = normalize_axis(axis, inputs[0]);

int axisSum = 0;
for (size_t i = 0; i < inputs.size(); i++)
Expand Down Expand Up @@ -201,7 +201,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
inps.getUMatVector(inputs);
outs.getUMatVector(outputs);

int cAxis = clamp(axis, inputs[0].dims);
int cAxis = normalize_axis(axis, inputs[0].dims);
if (padding)
return false;

Expand Down Expand Up @@ -255,7 +255,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);

int cAxis = clamp(axis, inputs[0].dims);
int cAxis = normalize_axis(axis, inputs[0].dims);
Mat& outMat = outputs[0];

if (padding)
Expand Down Expand Up @@ -296,7 +296,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
auto context = reinterpret_cast<csl::CSLContext*>(context_);

auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
auto concat_axis = clamp(axis, input_wrapper->getRank());
auto concat_axis = normalize_axis(axis, input_wrapper->getRank());
return make_cuda_node<cuda4dnn::ConcatOp>(preferableTarget, std::move(context->stream), concat_axis, padding);
}
#endif
Expand All @@ -305,7 +305,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
{
#ifdef HAVE_VULKAN
vkcom::Tensor in = VkComTensor(input[0]);
int cAxis = clamp(axis, in.dimNum());
int cAxis = normalize_axis(axis, in.dimNum());
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConcat(cAxis));
return Ptr<BackendNode>(new VkComBackendNode(input, op));
#endif // HAVE_VULKAN
Expand Down Expand Up @@ -341,7 +341,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);

InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->getDims().size()));
ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
Expand All @@ -354,7 +354,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
{
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
const int numDims = data->getDims().size();
const int cAxis = clamp(axis, numDims);
const int cAxis = normalize_axis(axis, numDims);
std::vector<size_t> maxDims(numDims, 0);

CV_Assert(inputs.size() == nodes.size());
Expand Down
12 changes: 6 additions & 6 deletions modules/dnn/src/layers/flatten_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ class FlattenLayerImpl CV_FINAL : public FlattenLayer
}

int numAxes = inputs[0].size();
int startAxis = clamp(_startAxis, numAxes);
int endAxis = clamp(_endAxis, numAxes);
int startAxis = normalize_axis(_startAxis, numAxes);
int endAxis = normalize_axis(_endAxis, numAxes);

CV_Assert(startAxis >= 0);
CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes);
Expand Down Expand Up @@ -120,8 +120,8 @@ class FlattenLayerImpl CV_FINAL : public FlattenLayer
inputs_arr.getMatVector(inputs);

int numAxes = inputs[0].dims;
_startAxis = clamp(_startAxis, numAxes);
_endAxis = clamp(_endAxis, numAxes);
_startAxis = normalize_axis(_startAxis, numAxes);
_endAxis = normalize_axis(_endAxis, numAxes);
}

#ifdef HAVE_OPENCL
Expand Down Expand Up @@ -195,8 +195,8 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
std::vector<size_t> dims = ieInpNode->get_shape();

int numAxes = dims.size();
int startAxis = clamp(_startAxis, numAxes);
int endAxis = clamp(_endAxis, numAxes);
int startAxis = normalize_axis(_startAxis, numAxes);
int endAxis = normalize_axis(_endAxis, numAxes);

CV_Assert(startAxis >= 0);
CV_Assert(endAxis >= startAxis && endAxis < numAxes);
Expand Down
8 changes: 4 additions & 4 deletions modules/dnn/src/layers/fully_connected_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer
CV_CheckEQ(blobs[0].dims, 2, "");
numOutput = blobs[0].size[0];
CV_Assert(!bias || (size_t)numOutput == blobs[1].total());
cAxis = clamp(axis, inputs[0]);
cAxis = normalize_axis(axis, inputs[0]);
}

MatShape outShape(cAxis + 1);
Expand Down Expand Up @@ -356,7 +356,7 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer
return true;
}

int axisCan = clamp(axis, inputs[0].dims);
int axisCan = normalize_axis(axis, inputs[0].dims);
int numOutput = blobs[0].size[0];
int innerSize = blobs[0].size[1];
int outerSize = total(shape(inputs[0]), 0, axisCan);
Expand Down Expand Up @@ -477,7 +477,7 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer

if (!blobs.empty())
{
int axisCan = clamp(axis, input[0].dims);
int axisCan = normalize_axis(axis, input[0].dims);
int outerSize = input[0].total(0, axisCan);

for (size_t i = 0; i < input.size(); i++)
Expand Down Expand Up @@ -525,7 +525,7 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer

auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();

auto flatten_start_axis = clamp(axis, input_wrapper->getRank());
auto flatten_start_axis = normalize_axis(axis, input_wrapper->getRank());

auto biasMat_ = bias ? biasMat : Mat();
return make_cuda_node<cuda4dnn::InnerProductOp>(preferableTarget, std::move(context->stream), std::move(context->cublas_handle), flatten_start_axis, weightsMat, biasMat_);
Expand Down
12 changes: 6 additions & 6 deletions modules/dnn/src/layers/normalize_bbox_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer

const UMat& inp0 = inputs[0];
UMat& buffer = internals[0];
startAxis = clamp(startAxis, inp0.dims);
endAxis = clamp(endAxis, inp0.dims);
startAxis = normalize_axis(startAxis, inp0.dims);
endAxis = normalize_axis(endAxis, inp0.dims);

size_t num = total(shape(inp0.size), 0, startAxis);
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
Expand Down Expand Up @@ -211,8 +211,8 @@ class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer

const Mat& inp0 = inputs[0];
Mat& buffer = internals[0];
startAxis = clamp(startAxis, inp0.dims);
endAxis = clamp(endAxis, inp0.dims);
startAxis = normalize_axis(startAxis, inp0.dims);
endAxis = normalize_axis(endAxis, inp0.dims);

const float* inpData = inp0.ptr<float>();
float* outData = outputs[0].ptr<float>();
Expand Down Expand Up @@ -378,8 +378,8 @@ class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer

NormalizeConfiguration<float> config;
config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
config.axis_start = clamp(startAxis, input_shape.size());
config.axis_end = clamp(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
config.axis_start = normalize_axis(startAxis, input_shape.size());
config.axis_end = normalize_axis(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
config.norm = pnorm;
config.eps = epsilon;

Expand Down
9 changes: 1 addition & 8 deletions modules/dnn/src/layers/reshape_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,7 @@ static void computeShapeByReshapeMask(const MatShape &srcShape,
int srcShapeSize = (int)srcShape.size();
int maskShapeSize = (int)maskShape.size();

if (srcRange == Range::all())
srcRange = Range(0, srcShapeSize);
else
{
int sz = srcRange.size();
srcRange.start = clamp(srcRange.start, srcShapeSize);
srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz;
}
srcRange = normalize_axis_range(srcRange, srcShapeSize);

bool explicitMask = !maskShape.empty(); // All mask values are positive.
for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i)
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/layers/scale_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ class ScaleLayerImpl CV_FINAL : public ScaleLayer
numChannels = blobs[0].total();

std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1);
int cAxis = clamp(axis, shape.size());
int cAxis = normalize_axis(axis, shape.size());
shape[cAxis] = numChannels;

auto node = ieInpNode0;
Expand Down
8 changes: 4 additions & 4 deletions modules/dnn/src/layers/slice_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ class SliceLayerImpl : public SliceLayer
for (int j = 0; j < sliceRanges[i].size(); ++j)
{
if (shapesInitialized || inpShape[j] > 0)
outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size();
outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size();
}
}
}
Expand Down Expand Up @@ -216,7 +216,7 @@ class SliceLayerImpl : public SliceLayer
// Clamp.
for (int j = 0; j < finalSliceRanges[i].size(); ++j)
{
finalSliceRanges[i][j] = clamp(finalSliceRanges[i][j], inpShape[j]);
finalSliceRanges[i][j] = normalize_axis_range(finalSliceRanges[i][j], inpShape[j]);
}
}

Expand Down Expand Up @@ -634,7 +634,7 @@ class CropLayerImpl CV_FINAL : public SliceLayerImpl
CV_Assert(inputs.size() == 2);

MatShape dstShape = inputs[0];
int start = clamp(axis, dstShape);
int start = normalize_axis(axis, dstShape);
for (int i = start; i < dstShape.size(); i++)
{
dstShape[i] = inputs[1][i];
Expand All @@ -653,7 +653,7 @@ class CropLayerImpl CV_FINAL : public SliceLayerImpl
const Mat &inpSzBlob = inputs[1];

int dims = inpBlob.dims;
int start_axis = clamp(axis, dims);
int start_axis = normalize_axis(axis, dims);

std::vector<int> offset_final(dims, 0);
if (offset.size() == 1)
Expand Down
14 changes: 7 additions & 7 deletions modules/dnn/src/layers/softmax_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
{
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
MatShape shape = inputs[0];
int cAxis = clamp(axisRaw, shape.size());
int cAxis = normalize_axis(axisRaw, shape.size());
shape[cAxis] = 1;
internals.assign(1, shape);
return inplace;
Expand Down Expand Up @@ -124,7 +124,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer

UMat& src = inputs[0];
UMat& dstMat = outputs[0];
int axis = clamp(axisRaw, src.dims);
int axis = normalize_axis(axisRaw, src.dims);

if (softmaxOp.empty())
{
Expand Down Expand Up @@ -216,7 +216,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
const Mat &src = inputs[0];
Mat &dst = outputs[0];

int axis = clamp(axisRaw, src.dims);
int axis = normalize_axis(axisRaw, src.dims);
size_t outerSize = src.total(0, axis), channels = src.size[axis],
innerSize = src.total(axis + 1);

Expand Down Expand Up @@ -306,7 +306,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
auto context = reinterpret_cast<csl::CSLContext*>(context_);

auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
auto channel_axis = clamp(axisRaw, input_wrapper->getRank());
auto channel_axis = normalize_axis(axisRaw, input_wrapper->getRank());
return make_cuda_node<cuda4dnn::SoftmaxOp>(preferableTarget, std::move(context->cudnn_handle), channel_axis, logSoftMax);
}
#endif
Expand All @@ -315,7 +315,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
{
#ifdef HAVE_VULKAN
vkcom::Tensor in = VkComTensor(inputs[0]);
int cAxis = clamp(axisRaw, in.dimNum());
int cAxis = normalize_axis(axisRaw, in.dimNum());
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpSoftmax(cAxis, logSoftMax));
return Ptr<BackendNode>(new VkComBackendNode(inputs, op));
#endif // HAVE_VULKAN
Expand Down Expand Up @@ -354,7 +354,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);

InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->getDims().size()));
ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));

return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
Expand All @@ -365,7 +365,7 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
int axis = clamp(axisRaw, ieInpNode->get_shape().size());
int axis = normalize_axis(axisRaw, ieInpNode->get_shape().size());
auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis);
if (logSoftMax)
return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax)));
Expand Down
Loading