Skip to content

Commit 236d210

Browse files
committed
use latest error handling conventions
1 parent e2ecae4 commit 236d210

File tree

18 files changed

+347
-236
lines changed

18 files changed

+347
-236
lines changed

nnvm/python/nnvm/frontend/__init__.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,3 @@
77
from .darknet import from_darknet
88
from .tensorflow import from_tensorflow
99
from .caffe2 import from_caffe2
10-
from .common import raise_not_supported, get_nnvm_op, required_attr, \
11-
warn_not_used, parse_tshape, parse_bool_str
12-
from tvm.error_handling import raise_attribute_required, \
13-
raise_attribute_invalid, \
14-
raise_operator_unimplemented, \
15-
raise_attribute_unimplemented, \
16-
warn_not_used

nnvm/python/nnvm/frontend/caffe2.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from __future__ import absolute_import as _abs
44
import tvm
55
from nnvm import symbol as _sym
6-
from nnvm.frontend.common import get_nnvm_op, Renamer, AttrConverter as AttrCvt
6+
from .common import get_nnvm_op
77
from .onnx_caffe2_utils import dimension_picker, dimension_constraint, infer_channels, revert_caffe2_pad
88
from . import onnx
99

@@ -73,7 +73,8 @@ def get_converter(cls):
7373

7474
if hasattr(cls, '_impl'):
7575
return getattr(cls, '_impl')
76-
raise_operator_unimplemented(cls.__name__)
76+
raise tvm.error.OpNotImplemented(
77+
'Operator {} is not implemented in frontend Caffe2.'.format(cls.__name__))
7778

7879

7980
_caffe2_internal_args = {
@@ -175,7 +176,7 @@ def _get_axis_from_order_str(order):
175176
return 1
176177
if order == 'NHWC':
177178
return 3
178-
raise_attribute_invalid(order, 'storage order', 'concat')
179+
raise tvm.error.OpAttributeInvalid('Value {} in attribute {} of operator {} is not valid.'.format(order, 'order', 'Concat'))
179180

180181
return AttrCvt(
181182
op_name='concatenate',
@@ -425,7 +426,8 @@ def _convert_operator(self,
425426
# Add a sanitizing step to convert all byte strings in args to strings
426427
sym = convert_map[op_type](inputs, args, self._params)
427428
else:
428-
raise_operator_unimplemented(op_type)
429+
raise tvm.error.OpNotImplemented(
430+
'Operator {} is not supported in frontend Caffe2.'.format(op_type))
429431
return sym
430432

431433

nnvm/python/nnvm/frontend/common.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,15 @@
77
def get_nnvm_op(op_name):
88
op = getattr(_sym, op_name)
99
if not op:
10-
raise_operator_unimplemented(op_name)
10+
raise OpNotImplemented(
11+
'Operator {} is not supported.'.format(op))
1112
return op
1213

1314
def required_attr(attr, key, op_name):
1415
assert isinstance(attr, dict)
1516
if key not in attr:
16-
raise_attribute_required(key, op_name)
17+
raise OpAttributeRequired(
18+
'Required attribute {} not found in operator {}'.format(key, op_name))
1719
return attr[key]
1820

1921
def parse_tshape(tshape):

nnvm/python/nnvm/frontend/coreml.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,10 @@
22
"""CoreML frontend."""
33
from __future__ import absolute_import as _abs
44
import numpy as np
5-
65
import tvm
6+
from .common import SymbolTable
77
from .. import symbol as _sym
88
from .._base import string_types
9-
from .common import SymbolTable
109

1110
__all__ = ['from_coreml']
1211

@@ -83,7 +82,8 @@ def BatchnormLayerParams(op, insym, symtab):
8382
"""Get layer of batchnorm parameter"""
8483
# this changes the symbol
8584
if op.instanceNormalization:
86-
raise_operator_unimplemented('instance normalization')
85+
msg = 'Operator "instance normalization" is not supported in frontend CoreML.'
86+
raise tvm.error.OpNotImplemented(msg)
8787
else:
8888
params = {'gamma':symtab.new_const(list(op.gamma.floatValue)),
8989
'beta':symtab.new_const(list(op.beta.floatValue)),
@@ -136,7 +136,8 @@ def ActivationParams(op, insym, symtab):
136136
betasym = symtab.new_const(beta)
137137
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
138138
_sym.exp(insym), betasym)), alphasym)
139-
raise_operator_unimplemented(whichActivation)
139+
raise tvm.error.OpNotImplemented(
140+
'Operator {} is not supported in frontend CoreML.'.format(whichActivation))
140141

141142
def ScaleLayerParams(op, insym, symtab):
142143
"""Scale layer params."""
@@ -158,7 +159,8 @@ def PoolingLayerParams(op, insym, symtab):
158159
return _sym.global_max_pool2d(insym)
159160
if op.type == 1:
160161
return _sym.global_avg_pool2d(insym)
161-
raise_operator_unimplemented('pooling (not max or average)')
162+
raise tvm.error.OpNotImplemented(
163+
'Operator pooling (not max or average) is not supported in frontend CoreML.')
162164

163165
else:
164166
params = {'pool_size':list(op.kernelSize),
@@ -178,8 +180,8 @@ def PoolingLayerParams(op, insym, symtab):
178180
params['padding'] = padding
179181
params['ceil_mode'] = True
180182
else:
181-
raise_attribute_invalid(op.WhichOneof('PoolingPaddingType'),
182-
'PoolingPaddingType', 'pooling')
183+
msg = 'Value {} in attribute PoolingPaddingType of operator Pooling is not valid.'
184+
raise tvm.error.OpAttributeInvalid(msg.format(op.WhichOneof('PoolingPaddingType')))
183185

184186
# consume padding layer
185187
if symtab.in_padding:
@@ -191,7 +193,8 @@ def PoolingLayerParams(op, insym, symtab):
191193
return _sym.max_pool2d(insym, **params)
192194
if op.type == 1:
193195
return _sym.avg_pool2d(insym, **params)
194-
raise_operator_unimplemented('pooling (not max or average)')
196+
msg = 'Operator pooling (not max or average) is not supported in frontend CoreML.'
197+
raise tvm.error.OpNotImplemented(msg)
195198

196199
def SoftmaxLayerParams(op, insym, symtab):
197200
return _sym.softmax(_sym.flatten(insym))
@@ -230,7 +233,8 @@ def ConcatLayerParams(op, insyms, symtab):
230233
if not isinstance(insyms, list):
231234
insyms = [insyms]
232235
if op.sequenceConcat:
233-
raise_operator_unimplemented('sequence concat')
236+
raise tvm.error.OpNotImplemented(
237+
'Operator Sequence Concat is not supported in frontend CoreML.')
234238
ret = _sym.concatenate(*insyms, axis=1)
235239
return ret
236240

@@ -244,14 +248,16 @@ def PaddingLayerParams(op, insym, symtab):
244248
if op.WhichOneof('PaddingType') == 'constant':
245249
constant = op.constant
246250
if constant.value != 0:
247-
raise_attribute_invalid(constant.value, 'padding value', 'padding')
251+
msg = 'Value {} in attribute "padding value" of operator Padding is not valid.'
252+
raise tvm.error.OpAttributeInvalid(msg.format(constant.value))
248253
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
249254
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
250255
for i, j in zip(padding, padding2):
251256
assert i == j
252257
symtab.set_padding(padding)
253258
else:
254-
raise_operator_unimplemented('non-constant padding')
259+
raise tvm.error.OpNotImplemented(
260+
'Operator "non-constant padding" is not supported in frontend CoreML.')
255261
return insym
256262

257263
def PermuteLayerParams(op, insym, symtab):
@@ -260,8 +266,8 @@ def PermuteLayerParams(op, insym, symtab):
260266

261267
def UpsampleLayerParams(op, insym, symtab):
262268
if op.scalingFactor[0] != op.scalingFactor[1]:
263-
raise_attribute_invalid(op.scalingFactor, 'scaling factors',
264-
'upsample')
269+
raise tvm.error.OpAttributeInvalid(
270+
'Height and width scaling factors of Upsample operator must be equal.')
265271
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
266272
return _sym.upsampling(insym, scale=op.scalingFactor[0], method=interpolationMode)
267273

@@ -342,7 +348,8 @@ def coreml_op_to_nnvm(op, inname, outname, symtab):
342348
"""
343349
classname = type(op).__name__
344350
if classname not in _convert_map:
345-
raise_operator_unimplemented(classname)
351+
raise tvm.error.OpNotImplemented(
352+
'Operator {} is not supported in frontend CoreML.'.format(classname))
346353
if isinstance(inname, string_types):
347354
insym = symtab.get_var(inname)
348355
else:

nnvm/python/nnvm/frontend/darknet.py

Lines changed: 33 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import numpy as np
77
import tvm
88
from .. import symbol as _sym
9+
from .common import get_nnvm_op, required_attr, parse_tshape, parse_bool_str
910

1011
class LAYERTYPE(object):
1112
"""Darknet LAYERTYPE Class constant."""
@@ -61,7 +62,8 @@ def _darknet_maxpooling(inputs, attrs):
6162
"""Process the max pool 2d operation."""
6263
kernel = parse_tshape(required_attr(attrs, 'kernel', 'maxpool'))
6364
if len(kernel) != 1:
64-
raise_attribute_unimplemented('non-2d kernel', 'pool_2d')
65+
raise tvm.error.OpAttributeUnimplemented(
66+
'Non-2D kernels for Max Pooling are not supported in frontend Darknet.')
6567

6668
op_name, new_attrs = 'max_pool2d', {}
6769
strides = int(attrs.get('stride', (1, 1)))
@@ -79,7 +81,8 @@ def _darknet_avgpooling(inputs, attrs):
7981
"""Process the average pool 2d operation."""
8082
kernel = parse_tshape(required_attr(attrs, 'kernel', 'avgpool'))
8183
if len(kernel) != 1:
82-
raise_attribute_unimplemented('non-2d kernel', 'pool_2d')
84+
raise tvm.error.OpAttributeUnimplemented(
85+
'Non-2D kernels for Average Pooling are not supported in frontend Darknet.')
8386

8487
op_name, new_attrs = 'avg_pool2d', {}
8588
strides = int(attrs.get('stride', (1, 1)))
@@ -103,10 +106,12 @@ def _darknet_conv2d(inputs, attrs):
103106
"""Process the convolution 2d operation."""
104107
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d'))
105108
if len(kernel) != 1:
106-
raise_attribute_unimplemented('non 2d kernel', 'conv2d')
109+
raise tvm.error.OpAttributeUnimplemented('Non-2D kernels for Conv2D are unsupported '
110+
'in frontend Darknet.')
107111
layout = attrs.get('layout', 'NCHW')
108112
if layout not in ['NCHW', 'NHWC']:
109-
raise_attribute_invalid(layout, 'layout', 'conv2d')
113+
raise tvm.error.OpAttributeInvalid(
114+
'Value {} in attribute "layout" of operator Conv2D is not valid.'.format(layout))
110115
strides = int(attrs.get('stride', (1, 1)))
111116
pads = int(attrs.get('pad', (0, 0)))
112117

@@ -142,13 +147,16 @@ def _darknet_conv2d(inputs, attrs):
142147
def _darknet_conv2d_transpose(inputs, attrs):
143148
"""Process the convolution 2d transpose operation."""
144149
if 'target_shape' in attrs:
145-
raise_attribute_unimplemented('target_shape', 'conv2d_transpose')
150+
raise tvm.error.OpAttributeUnimplemented(
151+
'Attribute "target_shape" is not supported in operator Conv2D-transpose.')
146152
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d_transpose'))
147153
if len(kernel) != 2:
148-
raise_attribute_unimplemented('non-2d kernel', 'conv2d_transpose')
154+
raise tvm.error.OpAttributeUnimplemented(
155+
'Non-2D kernels are not supported in operator Conv2D-transpose.')
149156
layout = attrs.get('layout', 'NCHW')
150157
if layout not in ['NCHW', 'NHWC']:
151-
raise_attribute_invalid(layout, 'layout', 'conv2d_transpose')
158+
msg = 'Value {} in attribute "layout" of operator Conv2D-transpose is not valid.'
159+
raise tvm.error.OpAttributeInvalid(msg.format(layout))
152160
op_name, new_attrs = 'conv2d_transpose', {}
153161
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d_transpose')
154162
new_attrs['kernel_size'] = kernel
@@ -222,7 +230,8 @@ def _darknet_dropout(inputs, attrs):
222230
def _darknet_reshape(inputs, attrs):
223231
"""Process the reshape operation."""
224232
if parse_bool_str(attrs, 'reverse'):
225-
raise_attribute_unimplemented('reverse', 'reshape')
233+
raise tvm.error.OpAttributeUnimplemented(
234+
'Attribute "reverse" is not supported in operator Reshape.')
226235
op_name, new_attrs = 'reshape', {}
227236
new_attrs['shape'] = required_attr(attrs, 'shape', 'reshape')
228237
return get_nnvm_op(op_name)(*inputs, **new_attrs), None
@@ -324,7 +333,8 @@ def _darknet_activations(inputs, attrs):
324333
elif ACTIVATION.ELU == act:
325334
act_type = 'elu'
326335
else:
327-
raise_operator_unimplemented('act: ' + act)
336+
raise tvm.error.OpNotImplemented(
337+
'Operator act: {} is not supported in framework Darknet.'.format(act))
328338

329339
if act_type in ['relu', 'tanh']:
330340
op_name, new_attrs = act_type, {}
@@ -339,7 +349,8 @@ def _darknet_activations(inputs, attrs):
339349
op_name, new_attrs = act_type, {}
340350
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
341351
else:
342-
raise_operator_unimplemented('act_type: ' + act_type)
352+
raise tvm.error.OpNotImplemented(
353+
'Operator act: {} is not supported in framework Darknet.'.format(act))
343354
return sym, None
344355

345356
def _darknet_op_not_support(inputs, attrs):
@@ -402,7 +413,8 @@ def _darknet_convert_symbol(op_name, inputs, attrs):
402413
if op_name in _DARKNET_CONVERT_MAP:
403414
sym, out_name = _DARKNET_CONVERT_MAP[op_name](inputs, attrs)
404415
else:
405-
raise_operator_unimplemented(op_name)
416+
raise tvm.error.OpNotImplemented(
417+
'Operator {} is not supported in frontend Darknet.'.format(op_name))
406418
if out_name is None:
407419
out_name = sym.list_output_names()[0].replace('_output', '')
408420
return out_name, sym
@@ -448,9 +460,10 @@ def _get_convolution_weights(self, layer, opname):
448460
if layer.nweights == 0:
449461
return
450462

451-
if (layer.n * layer.c * layer.size * layer.size) != layer.nweights:
452-
raise_attribute_invalid(layer.n * layer.c * layer.size * layer.size,
453-
'layer weights size', 'conv2d')
463+
if layer.n * layer.c * layer.size * layer.size != layer.nweights:
464+
msg = 'nweights ({}) != n * c * h * w ({}) in operator {}'
465+
msg = msg.format(layer.nweights, layer.n * layer.c * layer.size ** 2, opname)
466+
raise tvm.error.OpAttributeInvalid(msg)
454467

455468
shape = (layer.n, layer.c, layer.size, layer.size)
456469
weights = self._read_memory_buffer(shape, layer.weights)
@@ -630,7 +643,8 @@ def _get_darknet_attrs(self, layer, layer_num):
630643
pass
631644

632645
else:
633-
raise_operator_unimplemented(layer.type)
646+
raise tvm.error.OpNotImplemented(
647+
'Operator {} is not supported in frontend Darknet.'.format(layer.type))
634648

635649
return attr
636650

@@ -763,7 +777,8 @@ def _handle_darknet_rnn_layers(self, layer_num, sym):
763777

764778
elif LAYERTYPE.LSTM == layer.type:
765779
if layer.steps > 1:
766-
raise_attribute_invalid(layer.steps, 'number of steps', 'RNN')
780+
raise tvm.error.OpAttributeInvalid(
781+
'Number of steps {} of RNN is not valid.'.format(layer.steps))
767782

768783
op_name_add = 'elemwise_add'
769784
op_name_mul = 'elemwise_mul'
@@ -829,7 +844,8 @@ def _handle_darknet_rnn_layers(self, layer_num, sym):
829844

830845
elif LAYERTYPE.GRU == layer.type:
831846
if layer.steps > 1:
832-
raise_attribute_invalid(layer.steps, 'number of steps', 'RNN')
847+
raise tvm.error.OpAttributeInvalid(
848+
'Number of steps {} is not valid in RNN.'.format(layer.steps))
833849

834850
op_name_add = 'elemwise_add'
835851
op_name_mul = 'elemwise_mul'

0 commit comments

Comments
 (0)