Caffe2 - Python API
A deep learning, cross platform ML framework
fc.py
1 
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7 
8 from caffe2.python import core
9 
10 
11 def _FC_or_packed_FC(
12  model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
13  bias_init=None, **kwargs
14 ):
15  """FC"""
16  weight_init = weight_init or ('XavierFill', {})
17  bias_init = bias_init or ('ConstantFill', {})
18  blob_out = blob_out or model.net.NextName()
19  if model.init_params:
20  weight = model.param_init_net.__getattr__(weight_init[0])(
21  [],
22  blob_out + '_w',
23  shape=[dim_out, dim_in],
24  **weight_init[1]
25  )
26  bias = model.param_init_net.__getattr__(bias_init[0])(
27  [],
28  blob_out + '_b',
29  shape=[dim_out, ],
30  **bias_init[1]
31  )
32  else:
33  weight = core.ScopedBlobReference(
34  blob_out + '_w', model.param_init_net)
36  blob_out + '_b', model.param_init_net)
37 
38  if 'freeze_bias' in kwargs:
39  model.params.extend([weight])
40  else:
41  model.params.extend([weight, bias])
42 
43  model.weights.append(weight)
44  model.biases.append(bias)
45  return op_call([blob_in, weight, bias], blob_out, **kwargs)
46 
47 
48 def FC(model, *args, **kwargs):
49  return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
50 
51 
52 def PackedFC(model, *args, **kwargs):
53  return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
54 
55 
56 def FC_Decomp(
57  model, blob_in, blob_out, dim_in, dim_out,
58  rank_approx=5, weight_init=None,
59  bias_init=None, **kwargs
60 ):
61  """FC_Decomp version
62  Here we assume that the rank of original input is bigger than 5.
63  """
64  weight_init = weight_init if weight_init else ('XavierFill', {})
65  bias_init = bias_init if bias_init else ('ConstantFill', {})
66  blob_out = blob_out or model.net.NextName()
67  u = model.param_init_net.__getattr__(weight_init[0])(
68  [],
69  blob_out + '_u',
70  shape=[dim_out, rank_approx],
71  **weight_init[1]
72  )
73  v = model.param_init_net.__getattr__(weight_init[0])(
74  [],
75  blob_out + '_v',
76  shape=[dim_in, rank_approx],
77  **weight_init[1]
78  )
79  bias = model.param_init_net.__getattr__(bias_init[0])(
80  [],
81  blob_out + '_b',
82  shape=[dim_out, ],
83  **bias_init[1]
84  )
85  model.params.extend([u, v, bias])
86  return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
87 
88 
89 def FC_Prune(
90  model, blob_in, blob_out, dim_in, dim_out,
91  weight_init=None, bias_init=None, mask_init=None,
92  threshold=0.00001, need_compress_rate=False,
93  comp_lb=0.05,
94  **kwargs
95 ):
96  """FC_Prune version
97  Runnable so far. Great!:)
98  """
99  weight_init = weight_init if weight_init else ('XavierFill', {})
100  bias_init = bias_init if bias_init else ('ConstantFill', {})
101  mask_init = mask_init if mask_init else ('ConstantFill', {})
102  blob_out = blob_out or model.net.NextName()
103  compress_rate = blob_out + '_compress_rate'
104  if model.init_params:
105  compress_lb = model.param_init_net.ConstantFill(
106  [],
107  blob_out + '_lb',
108  shape=[1],
109  value=comp_lb
110  )
111  weight = model.param_init_net.__getattr__(weight_init[0])(
112  [],
113  blob_out + '_w',
114  shape=[dim_out, dim_in],
115  **weight_init[1]
116  )
117  mask = model.param_init_net.ConstantFill(
118  [],
119  blob_out + '_m',
120  shape=[dim_out, dim_in],
121  value=1.0
122  )
123  ag_dw = model.param_init_net.__getattr__(mask_init[0])(
124  [],
125  blob_out + '_ag_dw',
126  shape=[dim_out, dim_in],
127  **mask_init[1]
128  )
129  bias = model.param_init_net.__getattr__(bias_init[0])(
130  [],
131  blob_out + '_b',
132  shape=[dim_out, ],
133  **bias_init[1]
134  )
135  mask_seq = model.param_init_net.__getattr__(mask_init[0])(
136  [],
137  blob_out + '_mask_seq',
138  shape=[dim_out, dim_in],
139  **mask_init[1]
140  )
141  thres = model.param_init_net.ConstantFill(
142  [],
143  blob_out + '_thres',
144  shape=[1],
145  value=threshold
146  )
147  else:
148  compress_lb = core.ScopedBlobReference(
149  blob_out + '_lb', model.param_init_net)
150  weight = core.ScopedBlobReference(
151  blob_out + '_w', model.param_init_net)
153  blob_out + '_b', model.param_init_net)
155  blob_out + '_m', model.param_init_net)
156  ag_dw = core.ScopedBlobReference(
157  blob_out + '_ag_dw', model.param_init_net)
158  mask_seq = core.ScopedBlobReference(
159  blob_out + '_mask_seq', model.param_init_net)
160  thres = core.ScopedBlobReference(
161  blob_out + '_thres', model.param_init_net)
162 
163  model.params.extend([weight, bias])
164  if need_compress_rate:
165  return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq,
166  thres, compress_lb],
167  [blob_out, compress_rate], **kwargs)
168  else:
169  return model.net.FC_Prune([blob_in, weight, mask,
170  bias, ag_dw, mask_seq,
171  thres, compress_lb],
172  blob_out, **kwargs)
173 
174 
175 def FC_Sparse(
176  model, blob_in, blob_out, w_csr, iw, jw, bias,
177  **kwargs
178 ):
179  """FC_Sparse: Only takes in alocated weights"""
180  if not (w_csr and iw and jw and bias):
181  print("Warning...")
182  model.params.extend([w_csr, iw, jw, bias])
183  return model.net.FC_Sparse([blob_in, w_csr, iw, jw, bias],
184  blob_out, **kwargs)
def FC_Prune(model, blob_in, blob_out, dim_in, dim_out, weight_init=None, bias_init=None, mask_init=None, threshold=0.00001, need_compress_rate=False, comp_lb=0.05, kwargs)
Definition: fc.py:95
def ScopedBlobReference(name, args, kwargs)
Definition: core.py:212
def FC_Sparse(model, blob_in, blob_out, w_csr, iw, jw, bias, kwargs)
Definition: fc.py:178
def FC_Decomp(model, blob_in, blob_out, dim_in, dim_out, rank_approx=5, weight_init=None, bias_init=None, kwargs)
Definition: fc.py:60