From 66de2781160c53c1763012463de15779cbbc4836 Mon Sep 17 00:00:00 2001 From: Manu Seth <22492939+mseth10@users.noreply.github.com> Date: Mon, 4 Feb 2019 20:53:05 -0800 Subject: [PATCH] modifying SyncBN doc for FP16 use case (#14041) LGTM --- python/mxnet/gluon/contrib/nn/basic_layers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/mxnet/gluon/contrib/nn/basic_layers.py b/python/mxnet/gluon/contrib/nn/basic_layers.py index 28fea1592da4..56f0809b345f 100644 --- a/python/mxnet/gluon/contrib/nn/basic_layers.py +++ b/python/mxnet/gluon/contrib/nn/basic_layers.py @@ -165,7 +165,10 @@ class SyncBatchNorm(BatchNorm): Standard BN [1]_ implementation only normalize the data within each device. SyncBN normalizes the input within the whole mini-batch. - We follow the sync-onece implmentation described in the paper [2]_. + We follow the implementation described in the paper [2]_. + + Note: Current implementation of SyncBN does not support FP16 training. + For FP16 inference, use standard nn.BatchNorm instead of SyncBN. Parameters ----------