Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add 4 non-fault-tolerant demos #307

Merged
merged 1 commit into from
Aug 17, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 22 additions & 21 deletions demo/fit_a_line/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
import paddle.v2.dataset as dataset
import os
import gzip
import glob
import recordio
import cPickle as pickle

#PaddleCloud cached the dataset on /pfs/${DATACENTER}/public/dataset/...
dc = os.getenv("PADDLE_CLOUD_CURRENT_DATACENTER")
Expand All @@ -13,26 +10,30 @@
trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID"))
trainer_count = int(os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS"))

def recordio_reader_creator(path):
files = glob.glob(path)
files.sort()
flie_count = len(files)
files_current_train = []
# TODO(helin): remove this once paddle.v2.reader.creator.recordio is
# fixed.
def recordio(paths, buf_size=100):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As this implement for reader, each trainer will fetch the same training data, maybe the trainer would fetch a part of the whole training data, or i missed something?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In non fault tolerant mode, readers must fetch part of the training data by it self.

Copy link
Collaborator Author

@helinwang helinwang Aug 16, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks! Sharding does not work correctly before #319 and #318 is fixed, can I merge this first and submit a follow up PR for sharding after these issues being resloved?

The planned code is:

def get_shard(paths):
    trainer_count = int(os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS"))
    files = glob.glob(paths)
    files.sort()
    flie_count = len(files)
    files_current_train = []
    for idx, fn in enumerate(files):
         if idx % trainer_count == trainer_id:
             files_current_train.append(fn)                                                                                                                                                                
    return files_current_train

"""
Creates a data reader from given RecordIO file paths separated by ",",
glob pattern is supported.
:path: path of recordio files.
:returns: data reader of recordio files.
"""

for idx, fn in enumerate(files):
if idx % trainer_count == trainer_id:
files_current_train.append(fn)
import recordio as rec
import paddle.v2.reader.decorator as dec
import cPickle as pickle

def reader():
for fn in files_current_train:
reader = recordio.reader(fn)
while True:
r = reader.read()
if r is None:
break
yield pickle.loads(r)
reader.close()
return reader
f = rec.reader(paths)
while True:
r = f.read()
if r is None:
break
yield pickle.loads(r)
f.close()

return dec.buffered(reader, buf_size)

def main():
# init
Expand Down Expand Up @@ -74,7 +75,7 @@ def event_handler(event):
# training
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(recordio_reader_creator("/pfs/dlnel/public/dataset/uci_housing/uci_housing_train*"), buf_size=500),
paddle.reader.shuffle(recordio("/pfs/dlnel/public/dataset/uci_housing/uci_housing_train*"), buf_size=500),
batch_size=2),
feeding=feeding,
event_handler=event_handler,
Expand Down
249 changes: 249 additions & 0 deletions demo/label_semantic_roles/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,249 @@
import os
import math
import numpy as np
import paddle.v2 as paddle
import paddle.v2.dataset.conll05 as conll05
import paddle.v2.evaluator as evaluator

word_dict, verb_dict, label_dict = conll05.get_dict()
word_dict_len = len(word_dict)
label_dict_len = len(label_dict)
pred_len = len(verb_dict)

mark_dict_len = 2
word_dim = 32
mark_dim = 5
hidden_dim = 512
depth = 8
default_std = 1 / math.sqrt(hidden_dim) / 3.0
mix_hidden_lr = 1e-3

# TODO(helin): remove this once paddle.v2.reader.creator.recordio is
# fixed.
def recordio(paths, buf_size=100):
"""
Creates a data reader from given RecordIO file paths separated by ",",
glob pattern is supported.
:path: path of recordio files.
:returns: data reader of recordio files.
"""

import recordio as rec
import paddle.v2.reader.decorator as dec
import cPickle as pickle

def reader():
f = rec.reader(paths)
while True:
r = f.read()
if r is None:
break
yield pickle.loads(r)
f.close()

return dec.buffered(reader, buf_size)

def d_type(size):
return paddle.data_type.integer_value_sequence(size)


def db_lstm():
#8 features
word = paddle.layer.data(name='word_data', type=d_type(word_dict_len))
predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len))

ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len))
ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len))
ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len))
ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len))
ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len))
mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len))

emb_para = paddle.attr.Param(name='emb', initial_std=0., is_static=True)
std_0 = paddle.attr.Param(initial_std=0.)
std_default = paddle.attr.Param(initial_std=default_std)

predicate_embedding = paddle.layer.embedding(
size=word_dim,
input=predicate,
param_attr=paddle.attr.Param(name='vemb', initial_std=default_std))
mark_embedding = paddle.layer.embedding(
size=mark_dim, input=mark, param_attr=std_0)

word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
emb_layers = [
paddle.layer.embedding(size=word_dim, input=x, param_attr=emb_para)
for x in word_input
]
emb_layers.append(predicate_embedding)
emb_layers.append(mark_embedding)

hidden_0 = paddle.layer.mixed(
size=hidden_dim,
bias_attr=std_default,
input=[
paddle.layer.full_matrix_projection(
input=emb, param_attr=std_default) for emb in emb_layers
])

lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0)
hidden_para_attr = paddle.attr.Param(
initial_std=default_std, learning_rate=mix_hidden_lr)

lstm_0 = paddle.layer.lstmemory(
input=hidden_0,
act=paddle.activation.Relu(),
gate_act=paddle.activation.Sigmoid(),
state_act=paddle.activation.Sigmoid(),
bias_attr=std_0,
param_attr=lstm_para_attr)

#stack L-LSTM and R-LSTM with direct edges
input_tmp = [hidden_0, lstm_0]

for i in range(1, depth):
mix_hidden = paddle.layer.mixed(
size=hidden_dim,
bias_attr=std_default,
input=[
paddle.layer.full_matrix_projection(
input=input_tmp[0], param_attr=hidden_para_attr),
paddle.layer.full_matrix_projection(
input=input_tmp[1], param_attr=lstm_para_attr)
])

lstm = paddle.layer.lstmemory(
input=mix_hidden,
act=paddle.activation.Relu(),
gate_act=paddle.activation.Sigmoid(),
state_act=paddle.activation.Sigmoid(),
reverse=((i % 2) == 1),
bias_attr=std_0,
param_attr=lstm_para_attr)

input_tmp = [mix_hidden, lstm]

feature_out = paddle.layer.mixed(
size=label_dict_len,
bias_attr=std_default,
input=[
paddle.layer.full_matrix_projection(
input=input_tmp[0], param_attr=hidden_para_attr),
paddle.layer.full_matrix_projection(
input=input_tmp[1], param_attr=lstm_para_attr)
], )

return feature_out


def load_parameter(file_name, h, w):
with open(file_name, 'rb') as f:
f.read(16) # skip header.
return np.fromfile(f, dtype=np.float32).reshape(h, w)


def main():
paddle.init()

# define network topology
feature_out = db_lstm()
target = paddle.layer.data(name='target', type=d_type(label_dict_len))
crf_cost = paddle.layer.crf(
size=label_dict_len,
input=feature_out,
label=target,
param_attr=paddle.attr.Param(
name='crfw', initial_std=default_std, learning_rate=mix_hidden_lr))

crf_dec = paddle.layer.crf_decoding(
size=label_dict_len,
input=feature_out,
label=target,
param_attr=paddle.attr.Param(name='crfw'))
evaluator.sum(input=crf_dec)

# create parameters
parameters = paddle.parameters.create(crf_cost)
parameters.set('emb', load_parameter(conll05.get_embedding(), 44068, 32))

# create optimizer
optimizer = paddle.optimizer.Momentum(
momentum=0,
learning_rate=2e-2,
regularization=paddle.optimizer.L2Regularization(rate=8e-4),
model_average=paddle.optimizer.ModelAverage(
average_window=0.5, max_average_window=10000), )

trainer = paddle.trainer.SGD(
cost=crf_cost,
parameters=parameters,
update_equation=optimizer,
extra_layers=crf_dec)

reader = paddle.batch(
paddle.reader.shuffle(recordio("/pfs/dlnel/public/dataset/conll05/conl105_train-*"), buf_size=8192), batch_size=10)
reader_test = paddle.batch(
paddle.reader.shuffle(recordio("/pfs/dlnel/public/dataset/conll05/conl105_test-*"), buf_size=50), batch_size=10)

feeding = {
'word_data': 0,
'ctx_n2_data': 1,
'ctx_n1_data': 2,
'ctx_0_data': 3,
'ctx_p1_data': 4,
'ctx_p2_data': 5,
'verb_data': 6,
'mark_data': 7,
'target': 8
}

def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
if event.batch_id % 1000 == 0:
result = trainer.test(reader=reader, feeding=feeding)
print "\nTest with Pass %d, Batch %d, %s" % (
event.pass_id, event.batch_id, result.metrics)

if isinstance(event, paddle.event.EndPass):
# save parameters
with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
parameters.to_tar(f)

result = trainer.test(reader=reader_test, feeding=feeding)
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)

trainer.train(
reader=reader,
event_handler=event_handler,
num_passes=1,
feeding=feeding)

test_creator = paddle.dataset.conll05.test()
test_data = []
for item in test_creator():
test_data.append(item[0:8])
if len(test_data) == 1:
break

predict = paddle.layer.crf_decoding(
size=label_dict_len,
input=feature_out,
param_attr=paddle.attr.Param(name='crfw'))
probs = paddle.infer(
output_layer=predict,
parameters=parameters,
input=test_data,
field='id')
assert len(probs) == len(test_data[0][0])
labels_reverse = {}
for (k, v) in label_dict.items():
labels_reverse[v] = k
pre_lab = [labels_reverse[i] for i in probs]
print pre_lab


if __name__ == '__main__':
main()
Loading