Skip to content

Commit

Permalink
Merge pull request #6852 from tensor-tang/alexnet
Browse files Browse the repository at this point in the history
enable alexnet benchmark
  • Loading branch information
luotao1 authored Dec 22, 2017
2 parents 0c16512 + 025a6f3 commit 658dbe6
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 11 deletions.
27 changes: 20 additions & 7 deletions benchmark/paddle/image/alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,18 @@
width = 227
num_class = 1000
batch_size = get_config_arg('batch_size', int, 128)
gp = get_config_arg('layer_num', int, 1)
is_infer = get_config_arg("is_infer", bool, False)
num_samples = get_config_arg('num_samples', int, 2560)

args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
args = {
'height': height,
'width': width,
'color': True,
'num_class': num_class,
'is_infer': is_infer,
'num_samples': num_samples
}
define_py_data_sources2(
"train.list", None, module="provider", obj="process", args=args)

Expand All @@ -31,7 +41,7 @@

# conv2
net = img_conv_layer(
input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=1)
input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=gp)
net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75)
net = img_pool_layer(input=net, pool_size=3, stride=2)

Expand All @@ -40,11 +50,11 @@
input=net, filter_size=3, num_filters=384, stride=1, padding=1)
# conv4
net = img_conv_layer(
input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=1)
input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=gp)

# conv5
net = img_conv_layer(
input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=1)
input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=gp)
net = img_pool_layer(input=net, pool_size=3, stride=2)

net = fc_layer(
Expand All @@ -59,6 +69,9 @@
layer_attr=ExtraAttr(drop_rate=0.5))
net = fc_layer(input=net, size=1000, act=SoftmaxActivation())

lab = data_layer('label', num_class)
loss = cross_entropy(input=net, label=lab)
outputs(loss)
if is_infer:
outputs(net)
else:
lab = data_layer('label', num_class)
loss = cross_entropy(input=net, label=lab)
outputs(loss)
5 changes: 3 additions & 2 deletions benchmark/paddle/image/run_mkl_infer.sh
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,9 @@ fi
# inference benchmark
for use_mkldnn in True False; do
for batchsize in 1 2 4 8 16; do
infer googlenet v1 $batchsize $use_mkldnn
infer resnet 50 $batchsize $use_mkldnn
infer vgg 19 $batchsize $use_mkldnn
infer resnet 50 $batchsize $use_mkldnn
infer googlenet v1 $batchsize $use_mkldnn
infer alexnet 2 $batchsize $use_mkldnn
done
done
1 change: 1 addition & 0 deletions benchmark/paddle/image/run_mkl_train.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,5 +47,6 @@ for use_mkldnn in True False; do
train vgg 19 $batchsize $use_mkldnn
train resnet 50 $batchsize $use_mkldnn
train googlenet v1 $batchsize $use_mkldnn
train alexnet 2 $batchsize $use_mkldnn
done
done
5 changes: 3 additions & 2 deletions benchmark/paddle/image/run_openblas_infer.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ fi

# inference benchmark
for batchsize in 1 2 4 8 16; do
infer googlenet v1 $batchsize
infer resnet 50 $batchsize
infer vgg 19 $batchsize
infer resnet 50 $batchsize
infer googlenet v1 $batchsize
infer alexnet 2 $batchsize
done
1 change: 1 addition & 0 deletions benchmark/paddle/image/run_openblas_train.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,5 @@ for batchsize in 64 128 256; do
train vgg 19 $batchsize
train resnet 50 $batchsize
train googlenet v1 $batchsize
train alexnet 2 $batchsize
done

0 comments on commit 658dbe6

Please sign in to comment.