-
Notifications
You must be signed in to change notification settings - Fork 5.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
reduce unittest gpu memory #3448
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,7 +10,7 @@ class TestCrossEntropy(unittest.TestCase): | |
def setUp(self): | ||
# TODO this unit test is not passed | ||
self.type = "onehot_cross_entropy" | ||
batch_size = 100 | ||
batch_size = 32 | ||
class_num = 10 | ||
X = numpy.random.random((batch_size, class_num)).astype("float32") | ||
label = 5 * numpy.ones(batch_size).astype("int32") | ||
|
@@ -24,7 +24,7 @@ def setUp(self): | |
class CrossEntropyGradOpTest(GradientChecker): | ||
def test_softmax_grad(self): | ||
op = create_op("onehot_cross_entropy") | ||
batch_size = 100 | ||
batch_size = 32 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add test case with batch size = 0./ |
||
class_num = 10 | ||
inputs = { | ||
"X": numpy.random.uniform( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,7 +18,7 @@ class TestSoftmaxOp(unittest.TestCase): | |
|
||
def setUp(self): | ||
self.type = "softmax" | ||
self.inputs = {'X': np.random.random((32, 100)).astype("float32")} | ||
self.inputs = {'X': np.random.random((32, 22)).astype("float32")} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let us use small prime numbers. |
||
self.outputs = { | ||
'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,10 +11,10 @@ def test_int_tensor(self): | |
|
||
tensor = var.get_tensor() | ||
|
||
tensor.set_dims([1000, 784]) | ||
tensor.set_dims([100, 84]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you try three configurations -- [7, 0], [0, 7], [7,13] -- 7 and 13 are just small prime numbers. Prime numbers are reasonable becaue they are not power of 2 or any way defactorizable. 0 is a good boundary case. |
||
tensor.alloc_int(place) | ||
tensor_array = numpy.array(tensor) | ||
self.assertEqual((1000, 784), tensor_array.shape) | ||
self.assertEqual((100, 84), tensor_array.shape) | ||
tensor_array[3, 9] = 1 | ||
tensor_array[19, 11] = 2 | ||
tensor.set(tensor_array, place) | ||
|
@@ -30,11 +30,11 @@ def test_float_tensor(self): | |
|
||
tensor = var.get_tensor() | ||
|
||
tensor.set_dims([1000, 784]) | ||
tensor.set_dims([100, 84]) | ||
tensor.alloc_float(place) | ||
|
||
tensor_array = numpy.array(tensor) | ||
self.assertEqual((1000, 784), tensor_array.shape) | ||
self.assertEqual((100, 84), tensor_array.shape) | ||
tensor_array[3, 9] = 1.0 | ||
tensor_array[19, 11] = 2.0 | ||
tensor.set(tensor_array, place) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please add cases like
7 and 13 are just small prime numbers.