@@ -112,11 +112,55 @@ def _mx_zeros(inputs, attrs):
112112 return _op .zeros (shape = shape , dtype = dtype )
113113
114114
115+ def _mx_conv (inputs , attrs ):
116+ kernel_size = attrs .get_int_tuple ("kernel" )
117+ if len (kernel_size ) == 2 :
118+ return _mx_conv2d (inputs , attrs )
119+ elif len (kernel_size ) == 1 :
120+ return _mx_conv1d (inputs , attrs )
121+ else :
122+ raise tvm .error .OpAttributeInvalid (
123+ '1D or 2D kernels only are supported for operator Convolution' )
124+
125+ def _mx_conv1d (inputs , attrs ):
126+ kernel_size = attrs .get_int_tuple ("kernel" )
127+ if len (kernel_size ) != 1 :
128+ raise tvm .error .OpAttributeInvalid (
129+ 'Non 1D or 2D kernels are not supported for operator Convolution' )
130+ data_layout = attrs .get_str ("layout" , "NCW" )
131+ # MXNet Conv1D only supports ‘NCW’ layout for now.
132+ if data_layout != "NCW" :
133+ raise tvm .error .OpAttributeInvalid (
134+ 'Only "NCW" data layout is supported for 1D Convolution' )
135+ data_layout = "NCHW"
136+ channel_axis = 1
137+ kernel_layout = "OIHW"
138+
139+ new_attrs = {}
140+ new_attrs ["channels" ] = attrs .get_int ("num_filter" )
141+ new_attrs ["kernel_size" ] = (1 ,) + kernel_size
142+ new_attrs ["strides" ] = (1 ,) + attrs .get_int_tuple ("stride" , (1 ,))
143+ new_attrs ["padding" ] = (0 ,) + attrs .get_int_tuple ("pad" , (0 ,))
144+ new_attrs ["dilation" ] = (1 ,) + attrs .get_int_tuple ("dilate" , (1 ,))
145+ new_attrs ["groups" ] = attrs .get_int ("num_group" , 1 )
146+ new_attrs ["data_layout" ] = data_layout
147+ new_attrs ["kernel_layout" ] = kernel_layout
148+ use_bias = not attrs .get_bool ("no_bias" , False )
149+ data = _op .expand_dims (inputs [0 ], axis = 2 )
150+ kernel = _op .expand_dims (inputs [1 ], axis = 2 )
151+ res = _op .nn .conv2d (data , kernel , ** new_attrs )
152+ if use_bias :
153+ assert len (inputs ) == 3
154+ res = _op .nn .bias_add (res , inputs [2 ], axis = channel_axis )
155+ res = _op .squeeze (res , axis = [2 ])
156+ return res
157+
158+
115159def _mx_conv2d (inputs , attrs ):
116160 kernel_size = attrs .get_int_tuple ("kernel" )
117161 if len (kernel_size ) != 2 :
118162 raise tvm .error .OpAttributeInvalid (
119- 'Non- 2D kernels are not supported for operator Conv2D. ' )
163+ 'Non 1D or 2D kernels are not supported for operator Convolution ' )
120164 data_layout = attrs .get_str ("layout" , "NCHW" )
121165 channel_axis = _get_channel_axis (data_layout , "conv2d" )
122166
@@ -142,6 +186,51 @@ def _mx_conv2d(inputs, attrs):
142186 return res
143187
144188
189+ def _mx_conv_transpose (inputs , attrs ):
190+ kernel_size = attrs .get_int_tuple ("kernel" )
191+ if len (kernel_size ) == 2 :
192+ return _mx_conv2d_transpose (inputs , attrs )
193+ elif len (kernel_size ) == 1 :
194+ return _mx_conv1d_transpose (inputs , attrs )
195+ else :
196+ raise tvm .error .OpAttributeInvalid (
197+ '1D or 2D kernels only are supported for operator Convolution' )
198+
199+
200+ def _mx_conv1d_transpose (inputs , attrs ):
201+ if "target_shape" in attrs .attrs :
202+ raise tvm .error .OpAttributeUnImplemented (
203+ 'Attribute "target_shape" is not supported for operator Conv2D-transpose.' )
204+ data_layout = attrs .get_str ("layout" , "NCW" )
205+ if data_layout != "NCW" :
206+ raise tvm .error .OpAttributeInvalid (
207+ 'Only "NCW" data layout is supported for 1D Convolution' )
208+ data_layout = "NCHW"
209+ channel_axis = 1
210+ kernel_layout = "OIHW"
211+
212+ new_attrs = {}
213+ new_attrs ["channels" ] = attrs .get_int ("num_filter" )
214+ new_attrs ["kernel_size" ] = (1 ,) + attrs .get_int_tuple ("kernel" )
215+ new_attrs ["strides" ] = (1 ,) + attrs .get_int_tuple ("stride" , (1 ,))
216+ new_attrs ["output_padding" ] = (0 ,) + attrs .get_int_tuple ("adj" , (0 ,))
217+ new_attrs ["padding" ] = (0 ,) + attrs .get_int_tuple ("pad" , (0 ,))
218+ new_attrs ["dilation" ] = (1 ,) + attrs .get_int_tuple ("dilate" , (1 ,))
219+ new_attrs ["groups" ] = attrs .get_int ("num_group" , 1 )
220+ new_attrs ["data_layout" ] = data_layout
221+ new_attrs ["kernel_layout" ] = kernel_layout
222+ use_bias = not attrs .get_bool ("no_bias" , True )
223+ data = _op .expand_dims (inputs [0 ], axis = 2 )
224+ kernel = _op .expand_dims (inputs [1 ], axis = 2 )
225+ res = _op .nn .conv2d_transpose (data , kernel , ** new_attrs )
226+
227+ if use_bias :
228+ assert len (inputs ) == 3
229+ res = _op .nn .bias_add (res , inputs [2 ], axis = channel_axis )
230+ res = _op .squeeze (res , axis = [2 ])
231+ return res
232+
233+
145234def _mx_conv2d_transpose (inputs , attrs ):
146235 if "target_shape" in attrs .attrs :
147236 raise tvm .error .OpAttributeUnImplemented (
@@ -257,13 +346,7 @@ def _mx_slice(inputs, attrs):
257346 if end is None :
258347 raise tvm .error .OpAttributeRequired (
259348 'Attribute "end" not found in operator Slice.' )
260- if None in begin :
261- data_shape = _infer_type (inputs [0 ]).checked_type .shape
262- for i , beg in enumerate (begin ):
263- if beg is None :
264- assert end [i ] is None
265- begin [i ] = 0
266- end [i ] = data_shape [i ]
349+ begin = tuple (x if x is not None else 0 for x in begin )
267350 new_attrs = {'begin' : begin , 'end' : end }
268351 if stride is not None :
269352 new_attrs ['strides' ] = stride
@@ -373,6 +456,27 @@ def _mx_expand_dims(inputs, attrs):
373456 axis = attrs .get_int ("axis" )
374457 return _op .expand_dims (inputs [0 ], axis = axis )
375458
459+ def _mx_pad (inputs , attrs ):
460+ pad_mode = attrs .get_str ('mode' , None )
461+ if pad_mode is None :
462+ raise tvm .error .OpAttributeRequired (
463+ 'Attribute "mode" not found in operator pad.' )
464+ if pad_mode not in ['constant' , 'edge' , 'reflect' ]:
465+ raise tvm .error .OpAttributeInvalid (
466+ 'Value ' + mode + ' in attribute "mode" is not valid' )
467+ pad_width = attrs .get_int_tuple ('pad_width' , None )
468+ if pad_width is None :
469+ raise tvm .error .OpAttributeRequired (
470+ 'Attribute "pad_width" not found in operator pad.' )
471+ if None in pad_width :
472+ raise tvm .error .OpAttributeInvalid (
473+ 'Value None in attribute "pad_width" of operator Slice is not valid.' )
474+ constant_value = attrs .get_float ('constant_value' , 0.0 )
475+ padding = tuple (tuple ((b , a )) for b , a in zip (pad_width [::2 ], pad_width [1 ::2 ]))
476+ return _op .nn .pad (data = inputs [0 ],
477+ pad_width = padding ,
478+ pad_value = constant_value ,
479+ pad_mode = pad_mode )
376480
377481def _mx_leaky_relu (inputs , attrs ):
378482 act_type = attrs .get_str ("act_type" )
@@ -931,6 +1035,8 @@ def _mx_one_hot(inputs, attrs):
9311035 "ones_like" ,
9321036 "where" ,
9331037 "gather_nd" ,
1038+ "cos" ,
1039+ "sin"
9341040]
9351041
9361042_convert_map = {
@@ -943,6 +1049,7 @@ def _mx_one_hot(inputs, attrs):
9431049 "broadcast_mod" : _rename (_op .mod ),
9441050 "broadcast_maximum" : _rename (_op .maximum ),
9451051 "broadcast_minimum" : _rename (_op .minimum ),
1052+ "arctan" : _rename (_op .atan ),
9461053 "broadcast_equal" : _mx_compare (_op .equal , _rename ),
9471054 "broadcast_not_equal" : _mx_compare (_op .not_equal , _rename ),
9481055 "broadcast_greater" : _mx_compare (_op .greater , _rename ),
@@ -1018,9 +1125,9 @@ def _mx_one_hot(inputs, attrs):
10181125 "_zeros" : _mx_zeros ,
10191126 "FullyConnected" : _mx_fully_connected ,
10201127 "Activation" : _mx_activations ,
1021- "Convolution" : _mx_conv2d ,
1128+ "Convolution" : _mx_conv ,
10221129 "Convolution_v1" : _mx_conv2d ,
1023- "Deconvolution" : _mx_conv2d_transpose ,
1130+ "Deconvolution" : _mx_conv_transpose ,
10241131 "Pooling" : _mx_pooling ,
10251132 "Pooling_v1" : _mx_pooling ,
10261133 "Dropout" : _mx_dropout ,
@@ -1044,6 +1151,8 @@ def _mx_one_hot(inputs, attrs):
10441151 "_full" : _mx_full ,
10451152 "repeat" : _mx_repeat ,
10461153 "tile" : _mx_tile ,
1154+ "pad" : _mx_pad ,
1155+ "Pad" : _mx_pad ,
10471156 "take" : _mx_take ,
10481157 "reverse" : _mx_reverse ,
10491158 "squeeze" : _mx_squeeze ,
0 commit comments