1717# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
1818"""CoreML frontend."""
1919from __future__ import absolute_import as _abs
20+ import math
2021import numpy as np
2122import tvm
2223from .. import analysis
2627from ... import nd as _nd
2728from ..._ffi import base as _base
2829from .common import ExprTable
30+ from .common import infer_shape as _infer_shape
2931
3032__all__ = ['from_coreml' ]
3133
3234
3335def _NeuralNetworkImageScaler (op , inexpr , etab ):
36+ # TODO: we need to support more colorspace, such as rgb.
3437 # this changes the symbol
3538 biases = np .array ([op .blueBias , op .greenBias , op .redBias ]).reshape ([3 , 1 , 1 ])
3639 bias = etab .new_const (biases )
@@ -47,11 +50,16 @@ def _NeuralNetworkMeanImage(op, inexpr, etab):
4750
4851def _ConvolutionLayerParams (op , inexpr , etab ):
4952 """Convolution layer params."""
50- weights = etab .new_const (np .array (list (op .weights .floatValue )).reshape (
51- tuple ([op .outputChannels , op .kernelChannels ] + list (op .kernelSize ))))
53+ if op .isDeconvolution :
54+ weights = etab .new_const (np .array (list (op .weights .floatValue )).reshape (
55+ tuple ([op .kernelChannels , op .outputChannels ] + list (op .kernelSize ))))
56+ else :
57+ weights = etab .new_const (np .array (list (op .weights .floatValue )).reshape (
58+ tuple ([op .outputChannels , op .kernelChannels ] + list (op .kernelSize ))))
5259 dilation = list (op .dilationFactor )
5360 if not dilation :
5461 dilation = [1 , 1 ]
62+ N , C , H , W = _infer_shape (inexpr )
5563 params = {'channels' :op .outputChannels ,
5664 'kernel_size' :list (op .kernelSize ),
5765 'strides' :list (op .stride ),
@@ -60,30 +68,31 @@ def _ConvolutionLayerParams(op, inexpr, etab):
6068
6169 if op .WhichOneof ('ConvolutionPaddingType' ) == 'valid' :
6270 valid = op .valid
63- padding = [b .startEdgeSize for b in valid .paddingAmounts .borderAmounts ]
64- padding2 = [b .endEdgeSize for b in valid .paddingAmounts .borderAmounts ]
65- for i , j in zip (padding , padding2 ):
66- assert i == j , "Asymmetry padding not supported"
67- if padding :
68- params ['padding' ] = padding
71+ if valid .paddingAmounts .borderAmounts :
72+ assert len (valid .paddingAmounts .borderAmounts ) == 2
73+ pad_t = valid .paddingAmounts .borderAmounts [0 ].startEdgeSize
74+ pad_l = valid .paddingAmounts .borderAmounts [1 ].startEdgeSize
75+ pad_b = valid .paddingAmounts .borderAmounts [0 ].endEdgeSize
76+ pad_r = valid .paddingAmounts .borderAmounts [1 ].endEdgeSize
77+ inexpr = _op .nn .pad (data = inexpr , pad_width = ((0 , 0 ),
78+ (0 , 0 ),
79+ (pad_t , pad_b ),
80+ (pad_l , pad_r )))
6981 elif op .WhichOneof ('ConvolutionPaddingType' ) == 'same' :
82+ assert op .same .asymmetryMode == 0 , "Only support BOTTOM_RIGHT_HEAVY mode, " \
83+ "which is used by tf/caffe and so on"
7084 kernel = params ['kernel_size' ]
71- pad_h = kernel [ 0 ] - 1
72- pad_w = kernel [1 ] - 1
73- pad_t = pad_h // 2
74- pad_l = pad_w // 2
75- pad_b = pad_h - pad_t
76- pad_r = pad_w - pad_l
77- assert pad_t == pad_r and pad_l == pad_b , "Asymmetry padding not supported"
78- params [ 'padding' ] = [ pad_t , pad_l ]
85+ strides = params [ 'strides' ]
86+ pad_t , pad_b = get_pad_value ( H , kernel [0 ], strides [ 0 ])
87+ pad_l , pad_r = get_pad_value ( W , kernel [ 1 ], strides [ 1 ])
88+ inexpr = _op . nn . pad ( data = inexpr , pad_width = (( 0 , 0 ),
89+ ( 0 , 0 ),
90+ ( pad_t , pad_b ),
91+ ( pad_l , pad_r )))
92+
7993 else :
8094 raise NotImplementedError ("Valid/Same convolution padding implemented" )
8195
82- # consume padding layer
83- if etab .in_padding :
84- params ['padding' ] = [sum (x ) for x in zip (params .get ('padding' , [0 , 0 ]), etab .paddings )]
85- etab .clear_padding ()
86-
8796 if op .isDeconvolution :
8897 ret = _op .nn .conv2d_transpose (data = inexpr , weight = weights , ** params )
8998 else :
@@ -193,11 +202,13 @@ def _PoolingLayerParams(op, inexpr, etab):
193202
194203 if op .WhichOneof ('PoolingPaddingType' ) == 'valid' :
195204 valid = op .valid
196- padding = [b .startEdgeSize for b in valid .paddingAmounts .borderAmounts ]
197- padding2 = [b .endEdgeSize for b in valid .paddingAmounts .borderAmounts ]
198- for i , j in zip (padding , padding2 ):
199- assert i == j
200- params ['padding' ] = padding
205+ if valid .paddingAmounts .borderAmounts :
206+ assert len (valid .paddingAmounts .borderAmounts ) == 2
207+ pad_t = valid .paddingAmounts .borderAmounts [0 ].startEdgeSize
208+ pad_l = valid .paddingAmounts .borderAmounts [1 ].startEdgeSize
209+ pad_b = valid .paddingAmounts .borderAmounts [0 ].endEdgeSize
210+ pad_r = valid .paddingAmounts .borderAmounts [1 ].endEdgeSize
211+ params ['padding' ] = [pad_t , pad_l , pad_b , pad_r ]
201212 elif op .WhichOneof ('PoolingPaddingType' ) == 'includeLastPixel' :
202213 # I don't know if this is correct
203214 valid = op .includeLastPixel
@@ -209,12 +220,6 @@ def _PoolingLayerParams(op, inexpr, etab):
209220 op_name = op .WhichOneof ('PoolingPaddingType' )
210221 raise tvm .error .OpAttributeUnImplemented (msg .format (op_name ))
211222
212- # consume padding layer
213- if etab .in_padding :
214- params ['padding' ] = [sum (x ) for x in zip (
215- params .get ('padding' , [0 , 0 ]), etab .paddings )]
216- etab .clear_padding ()
217-
218223 if op .type == 0 :
219224 return _op .nn .max_pool2d (inexpr , ** params )
220225 if op .type == 1 :
@@ -276,21 +281,24 @@ def _FlattenLayerParams(op, inexpr, etab):
276281
277282
278283def _PaddingLayerParams (op , inexpr , etab ):
279- """Hacking for padding layer params."""
284+ """Padding layer params."""
280285 if op .WhichOneof ('PaddingType' ) == 'constant' :
281286 constant = op .constant
282287 if constant .value != 0 :
283288 raise tvm .error .OpAttributeUnImplemented (
284289 '{} is not supported in operator Padding.' .format (constant .value ))
285- padding = [b .startEdgeSize for b in op .paddingAmounts .borderAmounts ]
286- padding2 = [b .endEdgeSize for b in op .paddingAmounts .borderAmounts ]
287- for i , j in zip (padding , padding2 ):
288- assert i == j
289- etab .set_padding (padding )
290+ pad_t = op .paddingAmounts .borderAmounts [0 ].startEdgeSize
291+ pad_l = op .paddingAmounts .borderAmounts [1 ].startEdgeSize
292+ pad_b = op .paddingAmounts .borderAmounts [0 ].endEdgeSize
293+ pad_r = op .paddingAmounts .borderAmounts [1 ].endEdgeSize
294+ return _op .nn .pad (data = inexpr , pad_width = ((0 , 0 ),
295+ (0 , 0 ),
296+ (pad_t , pad_b ),
297+ (pad_l , pad_r )))
298+
290299 else :
291300 raise tvm .error .OpNotImplemented (
292301 'Non-constant padding is not supported in frontend CoreML.' )
293- return inexpr
294302
295303
296304def _PermuteLayerParams (op , inexpr , etab ):
@@ -372,6 +380,32 @@ def _MinLayerParams(op, inexpr, etab):
372380 'MinLayerParams' : _MinLayerParams ,
373381}
374382
383+ # SAME padding: https://www.tensorflow.org/api_guides/python/nn
384+ def get_pad_value (data , kernel , stride ):
385+ """Get the pad tuple of value for SAME padding
386+
387+ Parameters
388+ ----------
389+ data:
390+ 1D input data
391+
392+ kernel:
393+ 1D input kernel
394+
395+ stride:
396+ 1D input stride
397+
398+ Returns
399+ -------
400+ pad tuple of value
401+ """
402+
403+ out = int (math .ceil (float (data ) / float (stride )))
404+ pad = max (0 , (out - 1 ) * stride + kernel - data )
405+ pad_before = pad // 2
406+ pad_after = pad - pad_before
407+ return pad_before , pad_after
408+
375409
376410def coreml_op_to_relay (op , inname , outname , etab ):
377411 """Convert coreml layer to a Relay expression and update the expression table.
@@ -399,9 +433,7 @@ def coreml_op_to_relay(op, inname, outname, etab):
399433 insym = [etab .get_expr (i ) for i in inname ]
400434 ret = _convert_map [classname ](op , insym , etab )
401435 if outname :
402- etab .set_expr (outname , ret )
403- if classname != 'PaddingLayerParams' :
404- assert not etab .in_padding , "Previous padding not consumed by conv/pool"
436+ etab .set_expr (outname , ret , force_override = True )
405437
406438
407439def from_coreml (model , shape = None ):
@@ -442,10 +474,19 @@ def from_coreml(model, shape=None):
442474 for pp in cc .preprocessing :
443475 whichpp = pp .WhichOneof ('preprocessor' )
444476 ppmethod = getattr (pp , whichpp )
445- # the NeuralNetworkImageScalar doesn't seem to have a featureName?
446477 if whichpp == 'scaler' :
478+ # Be careful we maybe only preprocess one input when we have multi inputs
479+ # which is stored in pp.featureName. See unit testing verify_image_scaler
480+ # in test_forward.py for CoreML.
447481 for i in spec .description .input :
448- coreml_op_to_relay (ppmethod , i .name , i .name , etab )
482+ # we have multi inputs
483+ if len (spec .description .input ) > 1 :
484+ assert pp .featureName != ''
485+ if i .name == pp .featureName :
486+ coreml_op_to_relay (ppmethod , i .name , i .name , etab )
487+ else :
488+ assert pp .featureName == ''
489+ coreml_op_to_relay (ppmethod , i .name , i .name , etab )
449490 else :
450491 coreml_op_to_relay (ppmethod , pp .featureName , pp .featureName , etab )
451492
0 commit comments