diff --git a/src/operator/nn/upsampling-inl.h b/src/operator/nn/upsampling-inl.h index 662ba78cd84a..8219e3e9bd8d 100644 --- a/src/operator/nn/upsampling-inl.h +++ b/src/operator/nn/upsampling-inl.h @@ -59,7 +59,9 @@ struct UpSamplingParam : public dmlc::Parameter { .set_range(1, 1000) .describe("Up sampling scale"); DMLC_DECLARE_FIELD(num_filter) - .describe("Input filter. Only used by bilinear sample_type.") + .describe("Input filter. Only used by bilinear sample_type." + "Since bilinear upsampling uses deconvolution, num_filters " + "is set to the number of channels.") .set_default(0); DMLC_DECLARE_FIELD(sample_type) .add_enum("nearest", up_enum::kNearest) diff --git a/src/operator/nn/upsampling.cc b/src/operator/nn/upsampling.cc index d09017bf713e..81799268ad02 100644 --- a/src/operator/nn/upsampling.cc +++ b/src/operator/nn/upsampling.cc @@ -121,7 +121,9 @@ struct UpSamplingGrad { DMLC_REGISTER_PARAMETER(UpSamplingParam); NNVM_REGISTER_OP(UpSampling) -.describe("Performs nearest neighbor/bilinear up sampling to inputs.") +.describe("Performs nearest neighbor/bilinear up sampling to inputs. " + "Bilinear upsampling makes use of deconvolution. Therefore, " + "provide 2 inputs - data and weight. ") .set_num_inputs([](const NodeAttrs& attrs) { const UpSamplingParam& params = nnvm::get(attrs.parsed); return params.sample_type == up_enum::kNearest ? params.num_args : 2; @@ -149,7 +151,8 @@ NNVM_REGISTER_OP(UpSampling) .set_attr("FCompute", UpSamplingCompute) .set_attr("FGradient", UpSamplingGrad{"_backward_UpSampling"}) .set_attr("key_var_num_args", "num_args") -.add_argument("data", "NDArray-or-Symbol[]", "Array of tensors to upsample") +.add_argument("data", "NDArray-or-Symbol[]", "Array of tensors to upsample. " + "For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight.") .add_arguments(UpSamplingParam::__FIELDS__()) .set_attr("FSetInputVarAttrOnCompose", [](const nnvm::NodeAttrs& attrs, nnvm::NodePtr var, const int index) { diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 7169395205e0..2579749a351f 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -1491,17 +1491,32 @@ def check_nearest_upsampling_with_shape(shapes, scale, root_scale): assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4) -def check_bilinear_upsampling_with_shape(shapes, scale, root_scale): - arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)} - arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)} - - up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='bilinear', scale=root_scale) +def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter): + def _init_bilinear(arr, f): + weight = np.zeros(np.prod(arr.shape), dtype='float32') + shape = arr.shape + c = (2 * f - 1 - f % 2) / (2. * f) + for i in range(np.prod(shape)): + x = i % shape[3] + y = (i // shape[3]) % shape[2] + weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) + arr[:] = weight.reshape(shape) + return arr + + up = mx.sym.UpSampling(mx.sym.Variable("data"), + mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale, + num_filter=num_filter, num_args=2) + arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape) + arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()), + 'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))} + + arr_grad = [mx.nd.empty(s) for s in arg_shapes] exe = up.bind(default_context(), args=arr, args_grad=arr_grad) exe.forward(is_train=True) + out = exe.outputs[0].asnumpy() exe.backward(exe.outputs) - for k in range(len(shapes)): - name = 'arg_%d'%k - assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4) + target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale) + assert out.shape == data_shape[:2] + target_shape @with_seed() @@ -1514,6 +1529,22 @@ def test_nearest_upsampling(): check_nearest_upsampling_with_shape(shapes, scale, root_scale) +@with_seed() +def test_bilinear_upsampling(): + rootscale = [2,3] + scales = [1,2,3] + filters = [1,2,3] + bases = [1,2,3] + for params in itertools.product(rootscale, scales, filters, bases): + root_scale, scale, num_filter, base = params + # bilinear upsampling takes only 1 data and 1 weight + # multi input mode is not applicable + dimension = base*root_scale*scale + kernel = 2 * root_scale - root_scale % 2 + data_shape = (1, num_filter, dimension, dimension) + weight_shape = (1, num_filter, kernel, kernel) + check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter) + @with_seed() def test_batchnorm_training(): def check_batchnorm_training(stype):