Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Adding sparse support to MXTensor for custom operators #17569

Merged
merged 28 commits into from
Mar 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
a63bae9
Added enum for sparse storage
guanxinq Feb 11, 2020
93cddf4
Add structure for Dense and Sparse
guanxinq Feb 11, 2020
8ccfbd2
redesign the data structure for MXSparse
guanxinq Feb 13, 2020
8c9b358
pull out aux data from sparse NDArray
guanxinq Feb 14, 2020
2bf9200
Added more sparse arguments to API interface
guanxinq Feb 15, 2020
7eba53c
Passed sparse from c_api to lib_api.h and set in MXTensor
guanxinq Feb 17, 2020
3fdf771
Fix indent
guanxinq Feb 17, 2020
a1aa78f
fix segfault
guanxinq Feb 19, 2020
0537deb
Fix NDArray to MXTensor errors
guanxinq Feb 25, 2020
4f44695
Add a sample of sparse(CSR) transpose
guanxinq Feb 25, 2020
ade3e46
Make CSR transpose temporarily work by hardcoding
guanxinq Feb 26, 2020
9a26ac3
Fixed sparse output size(Refined)
guanxinq Mar 2, 2020
041470b
Add tests for symbolic and stateful ops
guanxinq Mar 3, 2020
a3b175b
Added a sample for row sparse transpose
guanxinq Mar 3, 2020
99d00c2
Added real row sparse transpose
guanxinq Mar 3, 2020
60e6753
Fix output size issue by adding lambda for CheckAndAlloc()
guanxinq Mar 10, 2020
3e7f23c
Fix mixed storage formats error
guanxinq Mar 11, 2020
b97bfad
Added infer storage type function
guanxinq Mar 12, 2020
41f0784
resolve comments
guanxinq Mar 13, 2020
bd40098
Set inferSType as optional function
guanxinq Mar 16, 2020
7e95dca
Resolve comments
guanxinq Mar 17, 2020
3f963f5
Add error messages
guanxinq Mar 17, 2020
0eb1de9
Resolve comments
guanxinq Mar 18, 2020
79d7d64
verify transpose ops results
guanxinq Mar 18, 2020
89d638f
Resolved merge conflict
guanxinq Mar 19, 2020
9dcb604
fix sanity check
guanxinq Mar 19, 2020
08faed4
Merge and resolve conflicts
guanxinq Mar 19, 2020
7f39b85
update MX_LIBRARY_VERSION to 5
guanxinq Mar 20, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions example/extensions/lib_custom_op/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,19 @@
# specific language governing permissions and limitations
# under the License.

all: gemm_lib relu_lib
all: gemm_lib relu_lib transposecsr_lib transposerowsp_lib

gemm_lib:
g++ -shared -fPIC -std=c++11 gemm_lib.cc -o libgemm_lib.so -I ../../../include/mxnet

relu_lib:
nvcc -shared -std=c++11 -Xcompiler -fPIC relu_lib.cu -o librelu_lib.so -I ../../../include/mxnet

transposecsr_lib:
g++ -shared -fPIC -std=c++11 transposecsr_lib.cc -o libtransposecsr_lib.so -I ../../../include/mxnet

transposerowsp_lib:
g++ -shared -fPIC -std=c++11 transposerowsp_lib.cc -o libtransposerowsp_lib.so -I ../../../include/mxnet

clean:
rm -rf libgemm_lib.so librelu_lib.so
rm -rf libgemm_lib.so librelu_lib.so libtransposecsr_lib.so libtransposerowsp_lib.so
78 changes: 78 additions & 0 deletions example/extensions/lib_custom_op/test_transposecsr.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/usr/bin/env python3

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

# coding: utf-8
# pylint: disable=arguments-differ

# This test checks dynamic loading of custom library into MXNet
# and checks end to end compute of a simple 2D gemm custom op

import mxnet as mx
import os

#load library
if (os.name=='posix'):
path = os.path.abspath('libtransposecsr_lib.so')
mx.library.load(path)
elif (os.name=='nt'):
path = os.path.abspath('libtransposecsr_lib.dll')
mx.library.load(path)

a = mx.nd.array([[1,3,0,2,1],[0,1,0,0,0],[0,2,4,5,3]])
a = a.tostype('csr')
print("--------Input CSR Array---------")
print("data:", a.data.asnumpy())
print("indices:", a.indices.asnumpy())
print("indptr:", a.indptr.asnumpy())

print("--------Start NDArray Compute---------")
b = mx.nd.my_transposecsr(a)
print("Compute Results:")
print("data:", b.data.asnumpy())
print("indices:", b.indices.asnumpy())
print("indptr:", b.indptr.asnumpy())

print("Stateful Compute Result:")
c = mx.nd.my_state_transposecsr(a, test_kw=100)
print("data:", c.data.asnumpy())
print("indices:", c.indices.asnumpy())
print("indptr:", c.indptr.asnumpy())

print("--------start symbolic compute--------")
d = mx.sym.Variable('d')
e = mx.sym.my_transposecsr(d)
f = mx.sym.my_state_transposecsr(d, test_kw=200)

exe = e.bind(ctx=mx.cpu(),args={'d':a})
exe2 = f.bind(ctx=mx.cpu(),args={'d':a})
out = exe.forward()
print("Compute Results:")
print("data:", out[0].data.asnumpy())
print("indices:", out[0].indices.asnumpy())
print("indptr:", out[0].indptr.asnumpy())

out2 = exe2.forward()
out2 = exe2.forward()
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
print("Stateful Compute Result:")
print("data:", out2[0].data.asnumpy())
print("indices:", out2[0].indices.asnumpy())
print("indptr:", out2[0].indptr.asnumpy())
guanxinq marked this conversation as resolved.
Show resolved Hide resolved

print("--------Baseline(dense)--------")
print(mx.nd.transpose(a.tostype('default')))
73 changes: 73 additions & 0 deletions example/extensions/lib_custom_op/test_transposerowsp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/usr/bin/env python3

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

# coding: utf-8
# pylint: disable=arguments-differ

# This test checks dynamic loading of custom library into MXNet
# and checks end to end compute of a simple 2D gemm custom op

import mxnet as mx
import os

#load library
if (os.name=='posix'):
path = os.path.abspath('libtransposerowsp_lib.so')
mx.library.load(path)
elif (os.name=='nt'):
path = os.path.abspath('libtransposerowsp_lib.dll')
mx.library.load(path)

a = mx.nd.array([[1,2,3],[0,0,0],[4,0,5],[0,0,0],[0,0,0]])
a = a.tostype('row_sparse')
print("--------Input CSR Array---------")
print("data:", a.data.asnumpy())
print("indices:", a.indices.asnumpy())

print("--------Start NDArray Compute---------")
b = mx.nd.my_transposerowsp(a)
print("Compute Results:")
print("data:", b.data.asnumpy())
print("indices:", b.indices.asnumpy())

print("Stateful Compute Result:")
c = mx.nd.my_state_transposerowsp(a, test_kw=100)
print("data:", c.data.asnumpy())
print("indices:", c.indices.asnumpy())

print("--------start symbolic compute--------")
d = mx.sym.Variable('d')
e = mx.sym.my_transposerowsp(d)
f = mx.sym.my_state_transposerowsp(d, test_kw=200)

exe = e.bind(ctx=mx.cpu(),args={'d':a})
exe2 = f.bind(ctx=mx.cpu(),args={'d':a})
out = exe.forward()
print("Compute Results:")
print("data:", out[0].data.asnumpy())
print("indices:", out[0].indices.asnumpy())

out2 = exe2.forward()
out2 = exe2.forward()
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
print("Stateful Compute Result:")
print("data:", out2[0].data.asnumpy())
print("indices:", out2[0].indices.asnumpy())

print("--------Baseline(dense)--------")
print(mx.nd.transpose(a.tostype('default')))
197 changes: 197 additions & 0 deletions example/extensions/lib_custom_op/transposecsr_lib.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* Copyright (c) 2020 by Contributors
* \file transsparse_lib.cc
* \brief Sample 2D transpose custom operator.
*/

#include <iostream>
#include "lib_api.h"

void transpose(MXTensor src, MXTensor dst, OpResource res) {
MXSparse* A = src.data<MXSparse>();
MXSparse* B = dst.data<MXSparse>();
std::vector<int64_t> shape = src.shape;
int64_t h = shape[0];
int64_t w = shape[1];
if(src.stype == kCSRStorage) {
float *Aval = (float*) (A->data);
// Here we need one more element to help calculate index(line 57).
std::vector<int64_t> rowPtr(w + 2, 0);
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
// count column
for(int i = 0; i < A->data_len; i++) {
rowPtr[A->indices[i] + 2]++;
}
// Accumulated sum. After this for loop, rowPtr[1:w+2) stores the correct
// result of transposed rowPtr.
for(int i = 2; i < rowPtr.size(); i++) {
rowPtr[i] += rowPtr[i - 1];
}

// Alloc memory for sparse data, where 0 is the index
// of B in output vector.
res.alloc_sparse(B, 0, A->data_len, w + 1);
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
float *Bval = (float*) (B->data);
for(int i = 0; i < h; i++) {
for(int j = A->indptr[i]; j < A->indptr[i + 1]; j++) {
// Helps calculate index and after that rowPtr[0:w+1) stores the
// correct result of transposed rowPtr.
int index = rowPtr[A->indices[j] + 1]++;
Bval[index] = Aval[j];
B->indices[index] = i;
}
}
memcpy(B->indptr, rowPtr.data(), sizeof(int64_t) * (w + 1));
}
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
}

MXReturnValue forward(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
// The data types and storage types of inputs and outputs should be the same.
if(inputs[0].dtype != outputs[0].dtype || inputs[0].stype != outputs[0].stype) {
std::cout << "Error! Expected all inputs and outputs to be the same type."
<< "Found input storage type:" << inputs[0].stype
<< " Found output storage type:" << outputs[0].stype
<< " Found input data type:" << inputs[0].dtype
<< " Found output data type:" << outputs[0].dtype << std::endl;
return MX_FAIL;
}

transpose(inputs[0], outputs[0], res);
return MX_SUCCESS;
}

MXReturnValue backward(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
return MX_SUCCESS;
}

MXReturnValue parseAttrs(std::map<std::string, std::string> attrs, int* num_in, int* num_out) {
*num_in = 1;
*num_out = 1;
return MX_SUCCESS;
}

MXReturnValue inferType(std::map<std::string, std::string> attrs,
std::vector<int> &intypes,
std::vector<int> &outtypes) {
// validate inputs
if (intypes.size() != 1) {
std::cout << "Expected 1 inputs to inferType" << std::endl;
return MX_FAIL;
}
if (intypes[0] != kFloat32) {
std::cout << "Expected input to have float32 type" << std::endl;
return MX_FAIL;
}

outtypes[0] = intypes[0];
return MX_SUCCESS;
}

MXReturnValue inferSType(std::map<std::string, std::string> attrs,
std::vector<int> &instypes,
std::vector<int> &outstypes) {
if (instypes[0] != kCSRStorage) {
std::cout << "Expected storage type is kCSRStorage" << std::endl;
return MX_FAIL;
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
}
outstypes[0] = instypes[0];
return MX_SUCCESS;
}

MXReturnValue inferShape(std::map<std::string, std::string> attrs,
std::vector<std::vector<unsigned int>> &inshapes,
std::vector<std::vector<unsigned int>> &outshapes) {
// validate inputs
if (inshapes.size() != 1) {
std::cout << "Expected 1 inputs to inferShape" << std::endl;
return MX_FAIL;
}

outshapes[0].push_back(inshapes[0][1]);
outshapes[0].push_back(inshapes[0][0]);
return MX_SUCCESS;
}

REGISTER_OP(my_transposecsr)
.setForward(forward, "cpu")
.setBackward(backward, "cpu")
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferSType(inferSType)
.setInferShape(inferShape);

/* ------------------------------------------------------------------------- */

class MyStatefulTransposeCSR : public CustomStatefulOp {
public:
explicit MyStatefulTransposeCSR(int count) : count(count) {}

MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::cout << "Info: keyword + number of forward: " << ++count << std::endl;
guanxinq marked this conversation as resolved.
Show resolved Hide resolved
std::map<std::string, std::string> attrs;
return forward(attrs, inputs, outputs, op_res);
}

MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backward(attrs, inputs, outputs, op_res);
}

private:
int count;
};

MXReturnValue createOpState(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
// testing passing of keyword arguments
int count = attrs.count("test_kw") > 0 ? std::stoi(attrs["test_kw"]) : 0;
// creating stateful operator instance
*op_inst = new MyStatefulTransposeCSR(count);
std::cout << "Info: stateful operator created" << std::endl;
return MX_SUCCESS;
}

REGISTER_OP(my_state_transposecsr)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferSType(inferSType)
.setInferShape(inferShape)
.setCreateOpState(createOpState, "cpu");

MXReturnValue initialize(int version) {
if (version >= 10400) {
std::cout << "MXNet version " << version << " supported" << std::endl;
return MX_SUCCESS;
} else {
std::cout << "MXNet version " << version << " not supported" << std::endl;
return MX_FAIL;
}
}
Loading