3 from __future__
import absolute_import
4 from __future__
import division
5 from __future__
import print_function
6 from __future__
import unicode_literals
8 from caffe2.python
import core
12 model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
13 bias_init=None, **kwargs
16 weight_init = weight_init
or (
'XavierFill', {})
17 bias_init = bias_init
or (
'ConstantFill', {})
18 blob_out = blob_out
or model.net.NextName()
20 weight = model.param_init_net.__getattr__(weight_init[0])(
23 shape=[dim_out, dim_in],
26 bias = model.param_init_net.__getattr__(bias_init[0])(
34 blob_out +
'_w', model.param_init_net)
36 blob_out +
'_b', model.param_init_net)
38 if 'freeze_bias' in kwargs:
39 model.params.extend([weight])
41 model.params.extend([weight, bias])
43 model.weights.append(weight)
44 model.biases.append(bias)
45 return op_call([blob_in, weight, bias], blob_out, **kwargs)
48 def FC(model, *args, **kwargs):
49 return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
52 def PackedFC(model, *args, **kwargs):
53 return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
57 model, blob_in, blob_out, dim_in, dim_out,
58 rank_approx=5, weight_init=None,
59 bias_init=None, **kwargs
62 Here we assume that the rank of original input is bigger than 5. 64 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
65 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
66 blob_out = blob_out
or model.net.NextName()
67 u = model.param_init_net.__getattr__(weight_init[0])(
70 shape=[dim_out, rank_approx],
73 v = model.param_init_net.__getattr__(weight_init[0])(
76 shape=[dim_in, rank_approx],
79 bias = model.param_init_net.__getattr__(bias_init[0])(
85 model.params.extend([u, v, bias])
86 return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
90 model, blob_in, blob_out, dim_in, dim_out,
91 weight_init=None, bias_init=None, mask_init=None,
92 threshold=0.00001, need_compress_rate=False,
97 Runnable so far. Great!:) 99 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
100 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
101 mask_init = mask_init
if mask_init
else (
'ConstantFill', {})
102 blob_out = blob_out
or model.net.NextName()
103 compress_rate = blob_out +
'_compress_rate' 104 if model.init_params:
105 compress_lb = model.param_init_net.ConstantFill(
111 weight = model.param_init_net.__getattr__(weight_init[0])(
114 shape=[dim_out, dim_in],
117 mask = model.param_init_net.ConstantFill(
120 shape=[dim_out, dim_in],
123 ag_dw = model.param_init_net.__getattr__(mask_init[0])(
126 shape=[dim_out, dim_in],
129 bias = model.param_init_net.__getattr__(bias_init[0])(
135 mask_seq = model.param_init_net.__getattr__(mask_init[0])(
137 blob_out +
'_mask_seq',
138 shape=[dim_out, dim_in],
141 thres = model.param_init_net.ConstantFill(
149 blob_out +
'_lb', model.param_init_net)
151 blob_out +
'_w', model.param_init_net)
153 blob_out +
'_b', model.param_init_net)
155 blob_out +
'_m', model.param_init_net)
157 blob_out +
'_ag_dw', model.param_init_net)
159 blob_out +
'_mask_seq', model.param_init_net)
161 blob_out +
'_thres', model.param_init_net)
163 model.params.extend([weight, bias])
164 if need_compress_rate:
165 return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq,
167 [blob_out, compress_rate], **kwargs)
169 return model.net.FC_Prune([blob_in, weight, mask,
170 bias, ag_dw, mask_seq,
176 model, blob_in, blob_out, w_csr, iw, jw, bias,
179 """FC_Sparse: Only takes in alocated weights""" 180 if not (w_csr
and iw
and jw
and bias):
182 model.params.extend([w_csr, iw, jw, bias])
183 return model.net.FC_Sparse([blob_in, w_csr, iw, jw, bias],
def FC_Prune(model, blob_in, blob_out, dim_in, dim_out, weight_init=None, bias_init=None, mask_init=None, threshold=0.00001, need_compress_rate=False, comp_lb=0.05, kwargs)
def ScopedBlobReference(name, args, kwargs)
def FC_Sparse(model, blob_in, blob_out, w_csr, iw, jw, bias, kwargs)
def FC_Decomp(model, blob_in, blob_out, dim_in, dim_out, rank_approx=5, weight_init=None, bias_init=None, kwargs)