3 from __future__
import absolute_import
4 from __future__
import division
5 from __future__
import print_function
6 from __future__
import unicode_literals
8 from caffe2.python
import core
11 def _ConvBase(model, is_nd, blob_in, blob_out, dim_in, dim_out, kernel,
12 weight_init=None, bias_init=None, group=1, transform_inputs=None,
13 use_cudnn=False, order="NCHW", cudnn_exhaustive_search=False,
14 ws_nbytes_limit=None, **kwargs):
17 if not isinstance(kernel, list):
22 kernels = [kernel] * 2
25 kwargs[
'engine'] =
'CUDNN' 26 kwargs[
'exhaustive_search'] = cudnn_exhaustive_search
28 kwargs[
'ws_nbytes_limit'] = ws_nbytes_limit
31 False if (
"no_bias" in kwargs
and kwargs[
"no_bias"])
else True 32 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
33 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
34 blob_out = blob_out
or model.net.NextName()
35 weight_shape = [dim_out]
37 weight_shape.append(int(dim_in / group))
38 weight_shape.extend(kernels)
40 weight_shape.extend(kernels)
41 weight_shape.append(int(dim_in / group))
44 weight = model.param_init_net.__getattr__(weight_init[0])(
51 bias = model.param_init_net.__getattr__(bias_init[0])(
59 blob_out +
'_w', model.param_init_net)
62 blob_out +
'_b', model.param_init_net)
64 model.params.extend([weight, bias])
66 model.params.extend([weight])
68 model.weights.append(weight)
71 model.biases.append(bias)
74 inputs = [blob_in, weight, bias]
76 inputs = [blob_in, weight]
78 if transform_inputs
is not None:
79 transform_inputs(model, blob_out, inputs)
84 if 'no_bias' in kwargs:
87 kwargs[
'group'] = group
89 return model.net.Conv(
96 return model.net.Conv(
104 def ConvNd(model, blob_in, blob_out, dim_in, dim_out, kernel,
105 weight_init=None, bias_init=None, group=1, transform_inputs=None,
106 order="NCHW", **kwargs):
107 """N-dimensional convolution for inputs with NCHW storage order. 109 assert order ==
"NCHW",
"ConvNd only supported for NCHW storage." 110 return _ConvBase(model,
True, blob_in, blob_out, dim_in, dim_out, kernel,
111 weight_init, bias_init, group, transform_inputs,
112 order=order, **kwargs)
115 def Conv(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None,
116 bias_init=None, group=1, transform_inputs=None, **kwargs):
117 """2-dimensional convolution. 119 return _ConvBase(model,
False, blob_in, blob_out, dim_in, dim_out, kernel,
120 weight_init, bias_init, group, transform_inputs, **kwargs)
124 model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None,
125 bias_init=None, use_cudnn=False, order="NCHW",
126 cudnn_exhaustive_search=False, ws_nbytes_limit=None, **kwargs
130 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
131 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
132 blob_out = blob_out
or model.net.NextName()
134 [dim_in, dim_out, kernel, kernel]
135 if order ==
"NCHW" else [dim_in, kernel, kernel, dim_out]
137 if model.init_params:
138 weight = model.param_init_net.__getattr__(weight_init[0])(
144 bias = model.param_init_net.__getattr__(bias_init[0])(
152 blob_out +
'_w', model.param_init_net)
154 blob_out +
'_b', model.param_init_net)
155 model.params.extend([weight, bias])
156 model.weights.append(weight)
157 model.biases.append(bias)
159 kwargs[
'engine'] =
'CUDNN' 160 kwargs[
'exhaustive_search'] = cudnn_exhaustive_search
162 kwargs[
'ws_nbytes_limit'] = ws_nbytes_limit
163 return model.net.ConvTranspose(
164 [blob_in, weight, bias],
184 """Group Convolution. 186 This is essentially the same as Conv with a group argument passed in. 187 We specialize this for backward interface compatibility. 189 return Conv(blob_in, blob_out, dim_in, dim_out, kernel,
190 weight_init=weight_init, bias_init=bias_init,
191 group=group, **kwargs)
205 cudnn_exhaustive_search=False,
206 ws_nbytes_limit=None,
208 """GroupConvolution's deprecated interface. 210 This is used to simulate a group convolution via split and concat. You 211 should always use the new group convolution in your new code. 213 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
214 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
215 use_bias =
False if (
"no_bias" in kwargs
and kwargs[
"no_bias"])
else True 217 kwargs[
'engine'] =
'CUDNN' 218 kwargs[
'exhaustive_search'] = cudnn_exhaustive_search
220 kwargs[
'ws_nbytes_limit'] = ws_nbytes_limit
222 raise ValueError(
"dim_in should be divisible by group.")
224 raise ValueError(
"dim_out should be divisible by group.")
225 splitted_blobs = model.net.DepthSplit(
227 [
'_' + blob_out +
'_gconv_split_' + str(i)
for i
in range(group)],
228 dimensions=[int(dim_in / group)
for i
in range(group)],
232 [dim_out / group, dim_in / group, kernel, kernel]
233 if order ==
"NCHW" else 234 [dim_out / group, kernel, kernel, dim_in / group]
238 weight_shape = [int(v)
for v
in weight_shape]
240 for i
in range(group):
241 if model.init_params:
242 weight = model.param_init_net.__getattr__(weight_init[0])(
244 blob_out +
'_gconv_%d_w' % i,
249 bias = model.param_init_net.__getattr__(bias_init[0])(
251 blob_out +
'_gconv_%d_b' % i,
252 shape=[int(dim_out / group)],
257 blob_out +
'_gconv_%d_w' % i, model.param_init_net)
260 blob_out +
'_gconv_%d_b' % i, model.param_init_net)
262 model.params.extend([weight, bias])
264 model.params.extend([weight])
265 model.weights.append(weight)
267 model.biases.append(bias)
269 inputs = [weight, bias]
272 if 'no_bias' in kwargs:
273 del kwargs[
'no_bias']
275 splitted_blobs[i].
Conv(
277 blob_out +
'_gconv_%d' % i,
283 concat, concat_dims = model.net.Concat(
286 "_" + blob_out +
"_concat_dims"],
def ConvTranspose(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, use_cudnn=False, order="NCHW", cudnn_exhaustive_search=False, ws_nbytes_limit=None, kwargs)
def ConvNd(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, transform_inputs=None, order="NCHW", kwargs)
def ScopedBlobReference(name, args, kwargs)
def GroupConv_Deprecated(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, use_cudnn=False, order="NCHW", cudnn_exhaustive_search=False, ws_nbytes_limit=None, kwargs)
def Conv(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, transform_inputs=None, kwargs)
def GroupConv(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, kwargs)