Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[KP] Fix registry and add UT for thresholded_relu & softshrink #40524

Merged
merged 23 commits into from
Mar 16, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
8f532b0
Merge pull request #1 from PaddlePaddle/develop
AshburnLee Sep 8, 2020
5b5804d
Merge pull request #2 from PaddlePaddle/develop
AshburnLee Sep 17, 2020
cee2470
Merge pull request #3 from PaddlePaddle/develop
AshburnLee Sep 30, 2020
5be3a45
Merge pull request #4 from PaddlePaddle/develop
AshburnLee Oct 13, 2020
a1d92b7
Merge pull request #5 from PaddlePaddle/develop
AshburnLee Oct 20, 2020
e674a5d
Merge pull request #6 from PaddlePaddle/develop
AshburnLee Nov 15, 2020
855d00b
Merge pull request #7 from PaddlePaddle/develop
AshburnLee Nov 18, 2020
7cb2c97
Merge pull request #8 from PaddlePaddle/develop
AshburnLee Mar 31, 2021
db9fc91
Merge pull request #9 from PaddlePaddle/develop
AshburnLee Apr 7, 2021
c7b68c8
Merge branch 'develop' of /~https://github.com/PaddlePaddle/paddle into…
AshburnLee Apr 26, 2021
0fd630e
Merge branch 'PaddlePaddle:develop' into develop
AshburnLee Aug 16, 2021
4bbb33b
Merge branch 'PaddlePaddle:develop' into develop
AshburnLee Sep 28, 2021
30a1a89
Merge branch 'PaddlePaddle:develop' into develop
AshburnLee Nov 22, 2021
d6cb683
Merge branch 'PaddlePaddle:develop' into develop
AshburnLee Nov 29, 2021
b2558a0
Merge branch 'PaddlePaddle:develop' into develop
AshburnLee Dec 21, 2021
06313d8
Merge branch 'PaddlePaddle:develop' into develop
AshburnLee Feb 21, 2022
6e329e5
Merge branch 'develop' of /~https://github.com/PaddlePaddle/paddle into…
AshburnLee Mar 14, 2022
0225022
Merge branch 'develop' of /~https://github.com/PaddlePaddle/paddle into…
AshburnLee Mar 14, 2022
0b6df77
init commit
AshburnLee Mar 14, 2022
e59973d
Merge branch 'develop' of /~https://github.com/PaddlePaddle/paddle into…
AshburnLee Mar 14, 2022
cdf101e
Merge branch 'develop' of /~https://github.com/PaddlePaddle/paddle into…
AshburnLee Mar 16, 2022
306bd2b
correct namespace
AshburnLee Mar 16, 2022
de5f891
Merge branch 'develop' of /~https://github.com/PaddlePaddle/paddle into…
AshburnLee Mar 16, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
276 changes: 218 additions & 58 deletions paddle/fluid/operators/activation_op.kps
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ limitations under the License. */
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"

#include "paddle/phi/kernels/funcs/activation_functor.h"

namespace paddle {
namespace operators {

Expand Down Expand Up @@ -1390,63 +1392,221 @@ REGISTER_OP_CUDA_KERNEL(
FOR_EACH_ACTIVATION_CUDA_OP(REGISTER_ACTIVATION_CUDA_KERNEL)

#ifdef PADDLE_WITH_XPU_KP
#define REGISTER_ACTIVATION_XPU_KERNEL(act_type, op_name, functor, \
grad_functor) \
REGISTER_OP_KERNEL( \
act_type, KP, plat::XPUPlace, \
ops::ActivationCudaKernel<plat::XPUDeviceContext, ops::functor<float>>); \
REGISTER_OP_KERNEL(act_type##_grad, KP, plat::XPUPlace, \
ops::ActivationGradCudaKernel<plat::XPUDeviceContext, \
ops::grad_functor<float>>);

REGISTER_ACTIVATION_XPU_KERNEL(leaky_relu, LeakyRelu, CudaLeakyReluFunctor,
CudaLeakyReluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(sigmoid, Sigmoid, CudaSigmoidFunctor,
CudaSigmoidGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(exp, Exp, CudaExpFunctor, CudaExpGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(log, Log, CudaLogFunctor, CudaLogGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(reciprocal, Reciprocal, CudaReciprocalFunctor,
CudaReciprocalGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(softplus, Softplus, CudaSoftplusFunctor,
CudaSoftplusGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(hard_swish, HardSwish, CudaHardSwishFunctor,
CudaHardSwishGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(elu, Elu, CudaELUFunctor, CudaELUGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(celu, Celu, CudaCELUFunctor,
CudaCELUGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(sqrt, Sqrt, CudaSqrtFunctor,
CudaSqrtGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(square, Square, CudaSquareFunctor,
CudaSquareGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(silu, Silu, CudaSiluFunctor,
CudaSiluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(logsigmoid, LogSigmoid, CudaLogSigmoidFunctor,
CudaLogSigmoidGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(softshrink, SoftShrink, CudaSoftShrinkFunctor,
CudaSoftShrinkGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(ceil, Ceil, CudaCeilFunctor,
CudaZeroGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(floor, Floor, CudaFloorFunctor,
CudaZeroGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(log1p, Log1p, CudaLog1pFunctor,
CudaLog1pGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(brelu, BRelu, CudaBReluFunctor,
CudaBReluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(soft_relu, SoftRelu, CudaSoftReluFunctor,
CudaSoftReluGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(softsign, Softsign, CudaSoftsignFunctor,
CudaSoftsignGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(relu6, Relu6, CudaRelu6Functor,
CudaRelu6GradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(hard_shrink, HardShrink, CudaHardShrinkFunctor,
CudaHardShrinkGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(hard_sigmoid, HardSigmoid,
CudaHardSigmoidFunctor,
CudaHardSigmoidGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(swish, Swish, CudaSwishFunctor,
CudaSwishGradFunctor);
REGISTER_ACTIVATION_XPU_KERNEL(thresholded_relu, ThresholdedRelu,
CudaThresholdedReluFunctor,
CudaThresholdedReluGradFunctor);
REGISTER_OP_KERNEL(
brelu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaBReluFunctor<float>>);
REGISTER_OP_KERNEL(
brelu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaBReluGradFunctor<float>>);

REGISTER_OP_KERNEL(ceil, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaCeilFunctor<float>>);
REGISTER_OP_KERNEL(
ceil_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaZeroGradFunctor<float>>);

REGISTER_OP_KERNEL(celu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaCELUFunctor<float>>);
REGISTER_OP_KERNEL(
celu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaCELUGradFunctor<float>>);

REGISTER_OP_KERNEL(elu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaELUFunctor<float>>);
REGISTER_OP_KERNEL(
elu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaELUGradFunctor<float>>);

REGISTER_OP_KERNEL(exp, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaExpFunctor<float>>);
REGISTER_OP_KERNEL(
exp_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaExpGradFunctor<float>>);

REGISTER_OP_KERNEL(floor, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaFloorFunctor<float>>);
REGISTER_OP_KERNEL(
floor_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaZeroGradFunctor<float>>);

REGISTER_OP_KERNEL(
hard_shrink, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardShrinkFunctor<float>>);
REGISTER_OP_KERNEL(
hard_shrink_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardShrinkGradFunctor<float>>);

REGISTER_OP_KERNEL(
hard_sigmoid, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSigmoidFunctor<float>>);
REGISTER_OP_KERNEL(
hard_sigmoid_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSigmoidGradFunctor<float>>);

REGISTER_OP_KERNEL(hard_swish, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSwishFunctor<float>>);
REGISTER_OP_KERNEL(
hard_swish_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaHardSwishGradFunctor<float>>);

REGISTER_OP_KERNEL(
leaky_relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaLeakyReluFunctor<float>>);
REGISTER_OP_KERNEL(
leaky_relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaLeakyReluGradFunctor<float>>);

REGISTER_OP_KERNEL(log, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogFunctor<float>>);
REGISTER_OP_KERNEL(
log_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogGradFunctor<float>>);

REGISTER_OP_KERNEL(log1p, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLog1pFunctor<float>>);
REGISTER_OP_KERNEL(
log1p_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLog1pGradFunctor<float>>);

REGISTER_OP_KERNEL(
logsigmoid, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogSigmoidFunctor<float>>);
REGISTER_OP_KERNEL(
logsigmoid_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaLogSigmoidGradFunctor<float>>);

REGISTER_OP_KERNEL(
reciprocal, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaReciprocalFunctor<float>>);
REGISTER_OP_KERNEL(
reciprocal_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaReciprocalGradFunctor<float>>);

REGISTER_OP_KERNEL(
relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaReluFunctor<float>>);
REGISTER_OP_KERNEL(
relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
phi::funcs::CudaReluGradFunctor<float>>);

REGISTER_OP_KERNEL(relu6, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaRelu6Functor<float>>);
REGISTER_OP_KERNEL(
relu6_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaRelu6GradFunctor<float>>);

REGISTER_OP_KERNEL(sigmoid, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSigmoidFunctor<float>>);
REGISTER_OP_KERNEL(
sigmoid_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSigmoidGradFunctor<float>>);

REGISTER_OP_KERNEL(silu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSiluFunctor<float>>);
REGISTER_OP_KERNEL(
silu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSiluGradFunctor<float>>);

REGISTER_OP_KERNEL(soft_relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftReluFunctor<float>>);
REGISTER_OP_KERNEL(
soft_relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftReluGradFunctor<float>>);

REGISTER_OP_KERNEL(softplus, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftplusFunctor<float>>);
REGISTER_OP_KERNEL(
softplus_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftplusGradFunctor<float>>);

REGISTER_OP_KERNEL(
softshrink, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftShrinkFunctor<float>>);
REGISTER_OP_KERNEL(
softshrink_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftShrinkGradFunctor<float>>);

REGISTER_OP_KERNEL(softsign, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftsignFunctor<float>>);
REGISTER_OP_KERNEL(
softsign_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSoftsignGradFunctor<float>>);

REGISTER_OP_KERNEL(sqrt, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSqrtFunctor<float>>);
REGISTER_OP_KERNEL(
sqrt_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSqrtGradFunctor<float>>);

REGISTER_OP_KERNEL(square, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSquareFunctor<float>>);
REGISTER_OP_KERNEL(
square_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSquareGradFunctor<float>>);

REGISTER_OP_KERNEL(swish, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSwishFunctor<float>>);
REGISTER_OP_KERNEL(
swish_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaSwishGradFunctor<float>>);

REGISTER_OP_KERNEL(
thresholded_relu, KP, plat::XPUPlace,
ops::ActivationCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaThresholdedReluFunctor<float>>);
REGISTER_OP_KERNEL(
thresholded_relu_grad, KP, plat::XPUPlace,
ops::ActivationGradCudaKernel<paddle::platform::XPUDeviceContext,
ops::CudaThresholdedReluGradFunctor<float>>);

#endif // PADDLE_WITH_XPU_KP
63 changes: 63 additions & 0 deletions python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -849,6 +849,38 @@ def ref_softsign(x):
return out


class XPUTestSoftshrinkOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'softshrink'
self.use_dynamic_create_class = False

class XPUTestSoftshrink(TestActivationOPBase):
def set_case(self):
self.op_type = "softshrink"
self.dtype = self.in_type

threshold = 0.5
np.random.seed(1023)
x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
out = ref_softshrink(x, threshold)

self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {'use_xpu': True}


support_types = get_xpu_op_support_types('softshrink')
for stype in support_types:
create_test_class(globals(), XPUTestSoftshrinkOP, stype)


def ref_softshrink(x, threshold=0.5):
out = np.copy(x)
out = (out < -threshold) * (out + threshold) + (out > threshold) * (
out - threshold)
return out


class XPUTestSwishOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'swish'
Expand Down Expand Up @@ -879,5 +911,36 @@ def ref_swish(x):
return out


class XPUTestThresholdedReluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'thresholded_relu'
self.use_dynamic_create_class = False

class XPUTestThresholdedRelu(TestActivationOPBase):
def set_case(self):
self.op_type = "thresholded_relu"
self.dtype = self.in_type

threshold = 1.0
np.random.seed(1024)
x = np.random.uniform(-20, 20, [10, 12]).astype(self.dtype)
x[np.abs(x) < 0.005] = 0.02
out = ref_thresholded_relu(x, threshold)

self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {'use_xpu': True}


support_types = get_xpu_op_support_types('thresholded_relu')
for stype in support_types:
create_test_class(globals(), XPUTestThresholdedReluOP, stype)


def ref_thresholded_relu(x, threshold=1.0):
out = (x > threshold) * x
return out


if __name__ == "__main__":
unittest.main()