Caffe2 - Python API
A deep learning, cross platform ML framework
caffe_translator.py
1 
4 
5 import argparse
6 import copy
7 import logging
8 import numpy as np # noqa
9 
10 from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2
11 from caffe.proto import caffe_pb2
12 from caffe2.python import core, utils, workspace
13 from google.protobuf import text_format
14 
15 logging.basicConfig()
16 log = logging.getLogger("caffe_translator")
17 log.setLevel(logging.INFO)
18 
19 
20 def _StateMeetsRule(state, rule):
21  """A function that reproduces Caffe's StateMeetsRule functionality."""
22  if rule.HasField('phase') and rule.phase != state.phase:
23  return False
24  if rule.HasField('min_level') and state.level < rule.min_level:
25  return False
26  if rule.HasField('max_level') and state.level > rule.max_lavel:
27  return False
28  curr_stages = set(list(state.stage))
29  # all stages in rule.stages should be in, otherwise it's not a match.
30  if len(rule.stage) and any([s not in curr_stages for s in rule.stage]):
31  return False
32  # none of the stage in rule.stages should be in, otherwise it's not a match.
33  if len(rule.not_stage) and any([s in curr_stages for s in rule.not_stage]):
34  return False
35  # If none of the nonmatch happens, return True.
36  return True
37 
38 
39 def _ShouldInclude(net_state, layer):
40  """A function that reproduces Caffe's inclusion and exclusion rule."""
41  ret = (len(layer.include) == 0)
42  # check exclude rules: if any exclusion is met, we shouldn't include.
43  ret &= not any([_StateMeetsRule(net_state, rule) for rule in layer.exclude])
44  if len(layer.include):
45  # check include rules: if any inclusion is met, we should include.
46  ret |= any([_StateMeetsRule(net_state, rule) for rule in layer.include])
47  return ret
48 
49 
50 class TranslatorRegistry(object):
51  registry_ = {}
52 
53  @classmethod
54  def Register(cls, op_name):
55  """A decorator for registering gradient mappings."""
56 
57  def Wrapper(func):
58  cls.registry_[op_name] = func
59  return func
60 
61  return Wrapper
62 
63  @classmethod
64  def TranslateLayer(cls, layer, pretrained_blobs, is_test):
65  try:
66  caffe_ops, params = cls.registry_[layer.type](
67  layer, pretrained_blobs, is_test)
68  except KeyError:
69  raise KeyError('No translator registered for layer: %s yet.' %
70  str(layer))
71  if caffe_ops is None:
72  caffe_ops = []
73  if type(caffe_ops) is not list:
74  caffe_ops = [caffe_ops]
75  return caffe_ops, params
76 
77  @classmethod
78  def TranslateModel(
79  cls,
80  caffe_net,
81  pretrained_net,
82  is_test=False,
83  net_state=None,
84  ):
85  net_state = caffe_pb2.NetState() if net_state is None else net_state
86  net = caffe2_pb2.NetDef()
87  net.name = caffe_net.name
88  net_params = caffe2_pb2.TensorProtos()
89  if len(caffe_net.layers) > 0:
90  raise ValueError(
91  'I think something is wrong. This translation script '
92  'only accepts new style layers that are stored in the '
93  'layer field.'
94  )
95  for layer in caffe_net.layer:
96  if not _ShouldInclude(net_state, layer):
97  log.info('Current net state does not need layer {}'
98  .format(layer.name))
99  continue
100  log.info('Translate layer {}'.format(layer.name))
101  # Get pretrained one
102  pretrained_layers = (
103  [l for l in pretrained_net.layer
104  if l.name == layer.name] + [l
105  for l in pretrained_net.layers
106  if l.name == layer.name]
107  )
108  if len(pretrained_layers) > 1:
109  raise ValueError(
110  'huh? more than one pretrained layer of one name?')
111  elif len(pretrained_layers) == 1:
112  pretrained_blobs = [
114  for blob in pretrained_layers[0].blobs
115  ]
116  else:
117  # No pretrained layer for the given layer name. We'll just pass
118  # no parameter blobs.
119  # print 'No pretrained layer for layer', layer.name
120  pretrained_blobs = []
121  operators, params = cls.TranslateLayer(
122  layer, pretrained_blobs, is_test)
123  net.op.extend(operators)
124  net_params.protos.extend(params)
125  return net, net_params
126 
127 
128 def TranslateModel(*args, **kwargs):
129  return TranslatorRegistry.TranslateModel(*args, **kwargs)
130 
131 
132 def ConvertTensorProtosToInitNet(net_params, input_name):
133  """Takes the net_params returned from TranslateModel, and wrap it as an
134  init net that contain GivenTensorFill.
135 
136  This is a very simple feature that only works with float tensors, and is
137  only intended to be used in an environment where you want a single
138  initialization file - for more complex cases, use a db to store the
139  parameters.
140  """
141  init_net = caffe2_pb2.NetDef()
142  for tensor in net_params.protos:
143  if len(tensor.float_data) == 0:
144  raise RuntimeError(
145  "Only float tensors are supported in this util.")
146  op = core.CreateOperator(
147  "GivenTensorFill", [], [tensor.name],
148  arg=[
149  utils.MakeArgument("shape", list(tensor.dims)),
150  utils.MakeArgument("values", tensor.float_data)])
151  init_net.op.extend([op])
152  init_net.op.extend([core.CreateOperator("ConstantFill", [], [input_name], shape=[1])])
153  return init_net
154 
155 
156 def BaseTranslate(layer, caffe2_type):
157  """A simple translate interface that maps the layer input and output."""
158  caffe2_op = caffe2_pb2.OperatorDef()
159  caffe2_op.type = caffe2_type
160  caffe2_op.input.extend(layer.bottom)
161  caffe2_op.output.extend(layer.top)
162  return caffe2_op
163 
164 
165 def AddArgument(op, key, value):
166  """Makes an argument based on the value type."""
167  op.arg.extend([utils.MakeArgument(key, value)])
168 
169 
172 
173 
174 @TranslatorRegistry.Register("Input")
175 def TranslateInput(layer, pretrained_blobs, is_test):
176  return [], []
177 
178 
179 @TranslatorRegistry.Register("Data")
180 def TranslateData(layer, pretrained_blobs, is_test):
181  return [], []
182 
183 
184 # A function used in convolution, pooling and deconvolution to deal with
185 # conv pool specific parameters.
186 def _TranslateStridePadKernelHelper(param, caffe_op):
187  try:
188  if (len(param.stride) > 1 or len(param.kernel_size) > 1 or
189  len(param.pad) > 1):
190  raise NotImplementedError(
191  "Translator currently does not support non-conventional "
192  "pad/kernel/stride settings."
193  )
194  stride = param.stride[0] if len(param.stride) else 1
195  pad = param.pad[0] if len(param.pad) else 0
196  kernel = param.kernel_size[0] if len(param.kernel_size) else 0
197  except TypeError:
198  # This catches the case of a PoolingParameter, in which case we are
199  # having non-repeating pad, stride and kernel.
200  stride = param.stride
201  pad = param.pad
202  kernel = param.kernel_size
203  # Get stride
204  if param.HasField("stride_h") or param.HasField("stride_w"):
205  AddArgument(caffe_op, "stride_h", param.stride_h)
206  AddArgument(caffe_op, "stride_w", param.stride_w)
207  else:
208  AddArgument(caffe_op, "stride", stride)
209  # Get pad
210  if param.HasField("pad_h") or param.HasField("pad_w"):
211  if param.pad_h == param.pad_w:
212  AddArgument(caffe_op, "pad", param.pad_h)
213  else:
214  AddArgument(caffe_op, "pad_t", param.pad_h)
215  AddArgument(caffe_op, "pad_b", param.pad_h)
216  AddArgument(caffe_op, "pad_l", param.pad_w)
217  AddArgument(caffe_op, "pad_r", param.pad_w)
218  else:
219  AddArgument(caffe_op, "pad", pad)
220  # Get kernel
221  if param.HasField("kernel_h") or param.HasField("kernel_w"):
222  AddArgument(caffe_op, "kernel_h", param.kernel_h)
223  AddArgument(caffe_op, "kernel_w", param.kernel_w)
224  else:
225  AddArgument(caffe_op, "kernel", kernel)
226 
227 
228 @TranslatorRegistry.Register("Convolution")
229 def TranslateConv(layer, pretrained_blobs, is_test):
230  param = layer.convolution_param
231  caffe_op = BaseTranslate(layer, "Conv")
232  output = caffe_op.output[0]
233  caffe_op.input.append(output + '_w')
234  _TranslateStridePadKernelHelper(param, caffe_op)
235  # weight
236  params = [
237  utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')]
238  # bias
239  if len(pretrained_blobs) == 2:
240  caffe_op.input.append(output + '_b')
241  params.append(
243  pretrained_blobs[1].flatten(), output + '_b'))
244  # Group convolution option
245  if param.group != 1:
246  AddArgument(caffe_op, "group", param.group)
247  # Get dilation - not tested. If you have a model and this checks out,
248  # please provide a test and uncomment this.
249  if len(param.dilation) > 0:
250  if len(param.dilation) == 1:
251  AddArgument(caffe_op, "dilation", param.dilation[0])
252  elif len(param.dilation) == 2:
253  AddArgument(caffe_op, "dilation_h", param.dilation[0])
254  AddArgument(caffe_op, "dilation_w", param.dilation[1])
255  return caffe_op, params
256 
257 
258 @TranslatorRegistry.Register("Deconvolution")
259 def TranslateDeconv(layer, pretrained_blobs, is_test):
260  param = layer.convolution_param
261  if param.group > 1:
262  raise NotImplementedError(
263  "Translator currently does not support group deconvolution."
264  )
265  caffe_op = BaseTranslate(layer, "ConvTranspose")
266  output = caffe_op.output[0]
267  _TranslateStridePadKernelHelper(param, caffe_op)
268  caffe_op.input.extend([output + '_w', output + '_b'])
269  AddArgument(caffe_op, "order", "NCHW")
270  weight = utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')
272  pretrained_blobs[1].flatten(), output + '_b'
273  )
274  return caffe_op, [weight, bias]
275 
276 
277 @TranslatorRegistry.Register("ReLU")
278 def TranslateRelu(layer, pretrained_blobs, is_test):
279  return BaseTranslate(layer, "Relu"), []
280 
281 
282 @TranslatorRegistry.Register("Pooling")
283 def TranslatePool(layer, pretrained_blobs, is_test):
284  param = layer.pooling_param
285  if param.pool == caffe_pb2.PoolingParameter.MAX:
286  caffe_op = BaseTranslate(layer, "MaxPool")
287  elif param.pool == caffe_pb2.PoolingParameter.AVE:
288  caffe_op = BaseTranslate(layer, "AveragePool")
289  _TranslateStridePadKernelHelper(param, caffe_op)
290  AddArgument(caffe_op, "order", "NCHW")
291  try:
292  # In the Facebook port of Caffe, a torch_pooling field was added to
293  # map the pooling computation of Torch. Essentially, it uses
294  # floor((height + 2 * padding - kernel) / stride) + 1
295  # instead of
296  # ceil((height + 2 * padding - kernel) / stride) + 1
297  # which is Caffe's version.
298  # Torch pooling is actually the same as Caffe2 pooling, so we don't
299  # need to do anything.
300  is_torch_pooling = param.torch_pooling
301  except AttributeError:
302  is_torch_pooling = False
303  if not is_torch_pooling:
304  AddArgument(caffe_op, "legacy_pad",
305  caffe2_legacy_pb2.CAFFE_LEGACY_POOLING)
306  if param.global_pooling:
307  AddArgument(caffe_op, "global_pooling", 1)
308  return caffe_op, []
309 
310 
311 @TranslatorRegistry.Register("LRN")
312 def TranslateLRN(layer, pretrained_blobs, is_test):
313  caffe_op = BaseTranslate(layer, "LRN")
314  caffe_op.output.extend(['_' + caffe_op.output[0] + '_scale'])
315  param = layer.lrn_param
316  if param.norm_region != caffe_pb2.LRNParameter.ACROSS_CHANNELS:
317  raise ValueError(
318  "Does not support norm region other than across channels.")
319  AddArgument(caffe_op, "size", int(param.local_size))
320  AddArgument(caffe_op, "alpha", float(param.alpha))
321  AddArgument(caffe_op, "beta", float(param.beta))
322  AddArgument(caffe_op, "bias", float(param.k))
323  AddArgument(caffe_op, "order", "NCHW")
324  return caffe_op, []
325 
326 
327 @TranslatorRegistry.Register("InnerProduct")
328 def TranslateInnerProduct(layer, pretrained_blobs, is_test):
329  param = layer.inner_product_param
330  try:
331  if param.axis != 1 or param.transpose:
332  raise ValueError(
333  "We don't have testing case for non-default axis and transpose "
334  "cases yet so we are disabling it for now. If you have a model "
335  "with this, please do send us your model for us to update this "
336  "support, and you are more than welcome to send a PR for this.")
337  except AttributeError:
338  # We might be using an historic Caffe protobuf that does not have axis
339  # and transpose arguments, so we will silently pass.
340  pass
341  caffe_op = BaseTranslate(layer, "FC")
342  output = caffe_op.output[0]
343  caffe_op.input.extend([output + '_w', output + '_b'])
344  # To provide the old-style 4-dimensional blob (1, 1, dim_output, dim_input)
345  # case, we always explicitly reshape the pretrained blob.
346  if pretrained_blobs[0].ndim not in [2, 4]:
347  raise ValueError("Unexpected weight ndim.")
348  if (pretrained_blobs[0].ndim == 4 and
349  list(pretrained_blobs[0].shape[:2]) != [1, 1]):
350  raise ValueError(
351  "If pretrained blob has 4 dims (old-style Caffe), the first two "
352  "should be of value 1, but I got " + str(pretrained_blobs[0].shape))
354  pretrained_blobs[0].reshape(-1, pretrained_blobs[0].shape[-1]),
355  output + '_w'
356  )
358  pretrained_blobs[1].flatten(), output + '_b'
359  )
360  return caffe_op, [weight, bias]
361 
362 
363 @TranslatorRegistry.Register("Dropout")
364 def TranslateDropout(layer, pretrained_blobs, is_test):
365  caffe_op = BaseTranslate(layer, "Dropout")
366  caffe_op.output.extend(['_' + caffe_op.output[0] + '_mask'])
367  param = layer.dropout_param
368  AddArgument(caffe_op, "ratio", param.dropout_ratio)
369  if (is_test):
370  AddArgument(caffe_op, "is_test", 1)
371  return caffe_op, []
372 
373 
374 @TranslatorRegistry.Register("Softmax")
375 def TranslateSoftmax(layer, pretrained_blobs, is_test):
376  caffe_op = BaseTranslate(layer, "Softmax")
377  return caffe_op, []
378 
379 
380 @TranslatorRegistry.Register("SoftmaxWithLoss")
381 def TranslateSoftmaxWithLoss(layer, pretrained_blobs, is_test):
382  softmax_op = core.CreateOperator(
383  "Softmax", [layer.bottom[0]],
384  layer.bottom[0] + "_translator_autogen_softmax")
385  xent_op = core.CreateOperator(
386  "LabelCrossEntropy",
387  [softmax_op.output[0], layer.bottom[1]],
388  layer.bottom[0] + "_translator_autogen_xent")
389  loss_op = core.CreateOperator(
390  "AveragedLoss",
391  xent_op.output[0],
392  layer.top[0])
393  return [softmax_op, xent_op, loss_op], []
394 
395 
396 @TranslatorRegistry.Register("Accuracy")
397 def TranslateAccuracy(layer, pretrained_blobs, is_test):
398  caffe_op = BaseTranslate(layer, "Accuracy")
399  if layer.accuracy_param.top_k != 1:
400  AddArgument(caffe_op, "top_k", layer.accuracy_param.top_k)
401  return caffe_op, []
402 
403 
404 @TranslatorRegistry.Register("Concat")
405 def TranslateConcat(layer, pretrained_blobs, is_test):
406  caffe_op = BaseTranslate(layer, "Concat")
407  caffe_op.output.extend(['_' + caffe_op.output[0] + '_dims'])
408  AddArgument(caffe_op, "order", "NCHW")
409  return caffe_op, []
410 
411 
412 @TranslatorRegistry.Register("TanH")
413 def TranslateTanH(layer, pretrained_blobs, is_test):
414  caffe_op = BaseTranslate(layer, "Tanh")
415  return caffe_op, []
416 
417 
418 @TranslatorRegistry.Register("InstanceNorm")
419 def TranslateInstanceNorm(layer, pretrained_blobs, is_test):
420  caffe_op = BaseTranslate(layer, "InstanceNorm")
421  output = caffe_op.output[0]
423  pretrained_blobs[0].flatten(), output + '_w')
425  pretrained_blobs[1].flatten(), output + '_b')
426  caffe_op.input.extend([output + '_w', output + '_b'])
427  AddArgument(caffe_op, "order", "NCHW")
428  return caffe_op, [weight, bias]
429 
430 
431 @TranslatorRegistry.Register("Eltwise")
432 def TranslateElementWise(layer, pretrained_blobs, is_test):
433  param = layer.eltwise_param
434  # TODO(jiayq): if we have a protobuf that uses this, lift this constraint
435  # and verify that we can correctly translate.
436  if len(param.coeff) or param.operation != 1:
437  raise RuntimeError("This eltwise layer is not yet supported.")
438  caffe_op = BaseTranslate(layer, "Sum")
439  return caffe_op, []
440 
441 
442 @TranslatorRegistry.Register("Scale")
443 def TranslateScale(layer, pretrained_blobs, is_test):
444  mul_op = BaseTranslate(layer, "Mul")
445  scale_param = layer.scale_param
446  AddArgument(mul_op, "axis", scale_param.axis)
447  AddArgument(mul_op, "broadcast", True)
448  if len(mul_op.input) == 1:
449  # the scale parameter is in pretrained blobs
450  if scale_param.num_axes != 1:
451  raise RuntimeError("This path has not been verified yet.")
452 
453  output = mul_op.output[0]
454  mul_op_param = output + '_w'
455  mul_op.input.append(mul_op_param)
456  weights = []
457  weights.append(utils.NumpyArrayToCaffe2Tensor(
458  pretrained_blobs[0].flatten(), mul_op_param))
459 
460  add_op = None
461  if len(pretrained_blobs) == 1:
462  # No bias-term in Scale layer
463  pass
464  elif len(pretrained_blobs) == 2:
465  # Caffe Scale layer supports a bias term such that it computes
466  # (scale_param * X + bias), whereas Caffe2 Mul op doesn't.
467  # Include a separate Add op for the bias followed by Mul.
468  add_op = copy.deepcopy(mul_op)
469  add_op.type = "Add"
470  add_op_param = output + '_b'
471  internal_blob = output + "_internal"
472  del mul_op.output[:]
473  mul_op.output.append(internal_blob)
474  del add_op.input[:]
475  add_op.input.append(internal_blob)
476  add_op.input.append(add_op_param)
477  weights.append(utils.NumpyArrayToCaffe2Tensor(
478  pretrained_blobs[1].flatten(), add_op_param))
479  else:
480  raise RuntimeError("Unexpected number of pretrained blobs in Scale")
481 
482  caffe_ops = [mul_op]
483  if add_op:
484  caffe_ops.append(add_op)
485  assert len(caffe_ops) == len(weights)
486  return caffe_ops, weights
487  elif len(mul_op.input) == 2:
488  # TODO(jiayq): find a protobuf that uses this and verify.
489  raise RuntimeError("This path has not been verified yet.")
490  else:
491  raise RuntimeError("Unexpected number of inputs.")
492 
493 
494 @TranslatorRegistry.Register("Reshape")
495 def TranslateReshape(layer, pretrained_blobs, is_test):
496  caffe_op = BaseTranslate(layer, "Reshape")
497  caffe_op.output.append("_" + caffe_op.input[0] + "_dims")
498  reshape_param = layer.reshape_param
499  AddArgument(caffe_op, 'shape', reshape_param.shape.dim)
500  return caffe_op, []
501 
502 
503 @TranslatorRegistry.Register("Flatten")
504 def TranslateFlatten(layer, pretrained_blobs, is_test):
505  param = layer.flatten_param
506  if param.end_axis != -1:
507  raise NotImplementedError("flatten_param.end_axis not supported yet.")
508 
509  if param.axis == 0:
510  caffe_op = BaseTranslate(layer, "FlattenToVec")
511  elif param.axis == 1:
512  caffe_op = BaseTranslate(layer, "Flatten")
513  else:
514  # This could be a Reshape op, but dim size is not known here.
515  raise NotImplementedError(
516  "Not supported yet for flatten_param.axis {}.".format(param.axis))
517 
518  return caffe_op, []
519 
520 
521 @TranslatorRegistry.Register("Sigmoid")
522 def TranslateSigmoid(layer, pretrained_blobs, is_test):
523  caffe_op = BaseTranslate(layer, "Sigmoid")
524  return caffe_op, []
525 
526 
527 @TranslatorRegistry.Register("ROIPooling")
528 def TranslateROIPooling(layer, pretrained_blobs, is_test):
529  caffe_op = BaseTranslate(layer, "RoIPool")
530  AddArgument(caffe_op, "order", "NCHW")
531 
532  if is_test:
533  AddArgument(caffe_op, "is_test", is_test)
534  else:
535  # Only used for gradient computation
536  caffe_op.output.append(caffe_op.output[0] + '_argmaxes')
537 
538  param = layer.roi_pooling_param
539  if param.HasField('pooled_h'):
540  AddArgument(caffe_op, 'pooled_h', param.pooled_h)
541  if param.HasField('pooled_w'):
542  AddArgument(caffe_op, 'pooled_w', param.pooled_w)
543  if param.HasField('spatial_scale'):
544  AddArgument(caffe_op, 'spatial_scale', param.spatial_scale)
545 
546  return caffe_op, []
547 
548 
549 @TranslatorRegistry.Register("PReLU")
550 def TranslatePRelu(layer, pretrained_blobs, is_test):
551  caffe_op = BaseTranslate(layer, "PRelu")
552  output = caffe_op.output[0]
553  caffe_op.input.extend([output + '_Slope'])
554  slope = utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_Slope')
555 
556  return caffe_op, [slope]
557 
558 
559 @TranslatorRegistry.Register("Reduction")
560 def TranslateReduction(layer, pretrained_blobs, is_test):
561  param = layer.reduction_param
562  if param.operation == caffe_pb2.ReductionParameter.SUM:
563  caffe_op = BaseTranslate(layer, "ReduceBackSum")
564  elif param.operation == caffe_pb2.ReductionParameter.MEAN:
565  caffe_op = BaseTranslate(layer, "ReduceBackMean")
566  else:
567  raise NotImplementedError("Not yet supported")
568 
569  if param.axis > 0:
570  # We can't figure out the number of dims to reduce from positive axis
571  # for back reduction since the shape info is not known here.
572  raise NotImplementedError("Not yet supported")
573  num_reduce_dim = -param.axis
574  AddArgument(caffe_op, "num_reduce_dim", num_reduce_dim)
575 
576  return caffe_op, []
577 
578 
579 if __name__ == '__main__':
580  parser = argparse.ArgumentParser(
581  description="Utilitity to convert pretrained caffe models to Caffe2 models.")
582  parser.add_argument("prototext", help="Caffe prototext.")
583  parser.add_argument("caffemodel", help="Caffe trained model.")
584  parser.add_argument("--init_net", help="Caffe2 initialization net.", default="init_net.pb")
585  parser.add_argument("--predict_net", help="Caffe2 prediction net.", default="predict_net.pb")
586  args = parser.parse_args()
587 
588  caffenet = caffe_pb2.NetParameter()
589  caffenet_pretrained = caffe_pb2.NetParameter()
590  input_proto = args.prototext
591  input_caffemodel = args.caffemodel
592  output_init_net = args.init_net
593  output_predict_net = args.predict_net
594 
595  text_format.Merge(
596  open(input_proto).read(), caffenet
597  )
598  caffenet_pretrained.ParseFromString(
599  open(input_caffemodel).read()
600  )
601  net, pretrained_params = TranslateModel(
602  caffenet, caffenet_pretrained, is_test=True
603  )
604 
605  # Assume there is one input and one output
606  external_input = net.op[0].input[0]
607  external_output = net.op[-1].output[0]
608 
609  net.external_input.extend([external_input])
610  net.external_input.extend([param.name for param in pretrained_params.protos])
611  net.external_output.extend([external_output])
612  init_net = ConvertTensorProtosToInitNet(pretrained_params, external_input)
613 
614  for param in pretrained_params.protos:
616  with open(output_predict_net, 'wb') as f:
617  f.write(net.SerializeToString())
618  with open(output_init_net, 'wb') as f:
619  f.write(init_net.SerializeToString())
def Caffe2TensorToNumpyArray(tensor)
Definition: utils.py:29
def NumpyArrayToCaffe2Tensor(arr, name=None)
Definition: utils.py:45
def MakeArgument(key, value)
Definition: utils.py:66
def CaffeBlobToNumpyArray(blob)
Definition: utils.py:18
def AddArgument(op, key, value)
def FeedBlob(name, arr, device_option=None)
Definition: workspace.py:229
def CreateOperator(operator_type, inputs, outputs, name='', control_input=None, device_option=None, arg=None, engine=None, kwargs)
Definition: core.py:259
def BaseTranslate(layer, caffe2_type)
def TranslateLayer(cls, layer, pretrained_blobs, is_test)
def ConvertTensorProtosToInitNet(net_params, input_name)
def TranslateInput(layer, pretrained_blobs, is_test)
Common translators for layers.