Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Recommend sys new api #10894

Merged
merged 20 commits into from
May 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
bc6edc5
Add recommendation system implementation with new API
sidgoyal78 May 9, 2018
fce6034
Address review comments
sidgoyal78 May 10, 2018
24aaf98
Merge branch 'develop' of /~https://github.com/PaddlePaddle/Paddle into…
sidgoyal78 May 16, 2018
eec0b18
Modify as per new API
sidgoyal78 May 16, 2018
a14423a
Modify train and test functions to enable data_feed_handler
sidgoyal78 May 17, 2018
cd788c6
Rename script to avoid same names for Cmake
sidgoyal78 May 17, 2018
d57afb6
Resolve merge conflict
sidgoyal78 May 18, 2018
cca4a55
Fix issues with data_feed_handler
sidgoyal78 May 18, 2018
d6c7703
Merge remote-tracking branch 'sid/new_api_recsys' into recommend_sys
daming-lu May 22, 2018
228a62a
Merge branch 'develop' into recommend_sys
daming-lu May 22, 2018
e22dc24
Merge remote-tracking branch 'upstream/develop' into recommend_sys
daming-lu May 23, 2018
89adf45
Merge remote-tracking branch 'upstream/develop' into recommend_sys
daming-lu May 23, 2018
5964c5e
change trainer back, rm data_feed_handler
daming-lu May 23, 2018
5131af8
Merge branch 'recommend_sys' of /~https://github.com/daming-lu/Paddle i…
daming-lu May 23, 2018
0dcf50d
3->4
daming-lu May 24, 2018
2789a53
rm func_feed
daming-lu May 24, 2018
8062588
epoch to step
daming-lu May 24, 2018
0f10f30
style
daming-lu May 24, 2018
cc69313
Merge remote-tracking branch 'upstream/develop' into recommend_sys
daming-lu May 24, 2018
1917c92
Merge branch 'recommend_sys' of /~https://github.com/daming-lu/Paddle i…
daming-lu May 24, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ add_subdirectory(fit_a_line)
add_subdirectory(recognize_digits)
add_subdirectory(image_classification)
add_subdirectory(understand_sentiment)
add_subdirectory(recommender_system)
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")

# default test
foreach(src ${TEST_OPS})
py_test(${src} SRCS ${src}.py)
endforeach()
Original file line number Diff line number Diff line change
@@ -0,0 +1,265 @@
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import sys
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets

IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256


def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may have range check.
# currently we don't have user configurated check.

USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1

uid = layers.data(name='user_id', shape=[1], dtype='int64')

usr_emb = layers.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)

usr_fc = layers.fc(input=usr_emb, size=32)

USR_GENDER_DICT_SIZE = 2

usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')

usr_gender_emb = layers.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)

usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)

USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")

usr_age_emb = layers.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')

usr_age_fc = layers.fc(input=usr_age_emb, size=16)

USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")

usr_job_emb = layers.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)

usr_job_fc = layers.fc(input=usr_job_emb, size=16)

concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)

usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

return usr_combined_features


def get_mov_combined_features():

MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1

mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')

mov_emb = layers.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)

mov_fc = layers.fc(input=mov_emb, size=32)

CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())

category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1)

mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)

mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")

MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())

mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1)

mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)

mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")

concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)

# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

return mov_combined_features


def inference_program():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()

inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)

return scale_infer


def train_program():

scale_infer = inference_program()

label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)

return [avg_cost, scale_infer]


def train(use_cuda, train_program, save_path):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
optimizer = fluid.optimizer.SGD(learning_rate=0.2)

trainer = fluid.Trainer(
train_func=train_program, place=place, optimizer=optimizer)

feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]

def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)

# get avg cost
avg_cost = np.array(avg_cost_set).mean()

print("avg_cost: %s" % avg_cost)

if float(avg_cost) < 4: # Smaller value to increase CI speed
trainer.save_params(save_path)
trainer.stop()
else:
print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1,
float(avg_cost)))
if math.isnan(float(avg_cost)):
sys.exit("got NaN loss, training failed.")

train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)

trainer.train(
num_epochs=1,
event_handler=event_handler,
reader=train_reader,
feed_order=[
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id',
'category_id', 'movie_title', 'score'
])


def infer(use_cuda, inference_program, save_path):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inference_program, param_path=save_path, place=place)

def create_lod_tensor(data, lod=None):
tensor = fluid.LoDTensor()
if lod is None:
# Tensor, the shape is [batch_size, 1]
index = 0
lod_0 = [index]
for l in range(len(data)):
index += 1
lod_0.append(index)
lod = [lod_0]
tensor.set_lod(lod)

flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
tensor.set(flattened_data, place)
return tensor

# Generate a random input for inference
user_id = create_lod_tensor([[1]])
gender_id = create_lod_tensor([[1]])
age_id = create_lod_tensor([[0]])
job_id = create_lod_tensor([[10]])
movie_id = create_lod_tensor([[783]])
category_id = create_lod_tensor([[10], [8], [9]], [[0, 3]])
movie_title = create_lod_tensor([[1069], [4140], [2923], [710], [988]],
[[0, 5]])

results = inferencer.infer(
{
'user_id': user_id,
'gender_id': gender_id,
'age_id': age_id,
'job_id': job_id,
'movie_id': movie_id,
'category_id': category_id,
'movie_title': movie_title
},
return_numpy=False)

print("infer results: ", np.array(results[0]))


def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
save_path = "recommender_system.inference.model"
train(use_cuda=use_cuda, train_program=train_program, save_path=save_path)
infer(
use_cuda=use_cuda,
inference_program=inference_program,
save_path=save_path)


if __name__ == '__main__':
main(USE_GPU)