Make use of explicit output shape for the de-convolution layer.
This is relevant to cases where the net is loaded from an ONNX model containing ConvTranspose layers, which may include the output_shape attribute. c.f. https://github.com/onnx/onnx/blob/master/docs/Operators.md#ConvTranspose
This commit is contained in:
parent
cedd78d526
commit
3bcff7f941
@ -591,6 +591,37 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
}
|
||||
layerParams.set("num_output", layerParams.blobs[0].size[1] * layerParams.get<int>("group", 1));
|
||||
layerParams.set("bias_term", node_proto.input_size() == 3);
|
||||
|
||||
if (layerParams.has("output_shape"))
|
||||
{
|
||||
const DictValue& outShape = layerParams.get("output_shape");
|
||||
|
||||
if (outShape.size() != 4)
|
||||
CV_Error(Error::StsNotImplemented, "Output shape must have 4 elements.");
|
||||
|
||||
const int strideY = layerParams.get<int>("stride_h", 1);
|
||||
const int strideX = layerParams.get<int>("stride_w", 1);
|
||||
const int outH = outShape.getIntValue(2);
|
||||
const int outW = outShape.getIntValue(3);
|
||||
|
||||
if (layerParams.get<String>("pad_mode") == "SAME")
|
||||
{
|
||||
layerParams.set("adj_w", (outW - 1) % strideX);
|
||||
layerParams.set("adj_h", (outH - 1) % strideY);
|
||||
}
|
||||
else if (layerParams.get<String>("pad_mode") == "VALID")
|
||||
{
|
||||
if (!layerParams.has("kernel_h") || !layerParams.has("kernel_w"))
|
||||
CV_Error(Error::StsNotImplemented,
|
||||
"Required attributes 'kernel_h' and 'kernel_w' are not present.");
|
||||
|
||||
int kernelH = layerParams.get<int>("kernel_h");
|
||||
int kernelW = layerParams.get<int>("kernel_w");
|
||||
|
||||
layerParams.set("adj_w", (outW - kernelW) % strideX);
|
||||
layerParams.set("adj_h", (outH - kernelH) % strideY);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (layer_type == "Transpose")
|
||||
{
|
||||
|
||||
Loading…
Reference in New Issue
Block a user