Skip to content

Commit

Permalink
Fix typos (#54015)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored May 23, 2023
1 parent 11aa5ed commit adca365
Show file tree
Hide file tree
Showing 20 changed files with 47 additions and 47 deletions.
2 changes: 1 addition & 1 deletion paddle/.common_test_util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ PORT_LOCK_FILE=/tmp/paddle_test_ports.lock
touch $PORT_FILE $PORT_LOCK_FILE 2>/dev/null
chmod a+rw $PORT_FILE $PORT_LOCK_FILE 2>/dev/null

# acquire a range of ports that not used by other runtests.sh currentlly.
# acquire a range of ports that not used by other runtests.sh currently.
# return 1 if ports is used by other, otherwise return 0.
# NOTE: the acquire_ports/release_ports is interprocess mutexed.
#
Expand Down
4 changes: 2 additions & 2 deletions paddle/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ add_subdirectory(ir)
# (4) the tests binaries are generated in different directories, as the same as the
# folder of source file.

# Now, we want to make all cc tests dynamically linked to the main paddle labrary,
# Now, we want to make all cc tests dynamically linked to the main paddle library,
# i.e., `libpaddle.so`, so we changes the logic of (2), (3), (4):
# (2) calling `cc_test()` in each `CMakeLists.txt` will not `exactly` add test, but
# record all tests and its source files, the action of add tests is defered to HERE.
Expand All @@ -26,7 +26,7 @@ add_subdirectory(ir)
# (3) the tests links dynamic libraries, `libpaddle.so`
# (4) the tests are generated to the same directory, i.e., `CC_TESTS_DIR` defined above.

# Next, (to be discusssed)
# Next, (to be discussed)
# (1) move all source files to same folder,
# (2) naturally, and configure tests in only one `CMakeLists.txt`,
# (3) cc tests support linking pre-built dynamic libraries. For example, use the dynamic
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/dialect/pd_type_storage.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ struct hash<std::vector<T>> {
namespace paddle {
namespace dialect {
///
/// \brief Define Parameteric TypeStorage for DenseTensorType.
/// \brief Define Parametric TypeStorage for DenseTensorType.
///
/// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the
/// following methods: (1)declare ParamKey, (2)define Construction method,
Expand Down Expand Up @@ -93,7 +93,7 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
offset_(offset) {}

///
/// \brief Each derived TypeStorage must define a Construc method, which
/// \brief Each derived TypeStorage must define a Construct method, which
/// StorageManager uses to construct a derived TypeStorage.
///
static DenseTensorTypeStorage *Construct(ParamKey key) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/amp_auto_cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ AmpOperators::AmpOperators()
OpSupportedInfos("GPU", paddle::framework::proto::VarType::BF16));
unsupported_bf16_ops_->insert(unsupported_ops_gpu_bf16.begin(),
unsupported_ops_gpu_bf16.end());
// NOTE: GPU/XPU is compiled seperatly.
// NOTE: GPU/XPU is compiled separately.
#elif defined(PADDLE_WITH_XPU)
auto unsupported_ops_xpu_fp16 = std::get<2>(
OpSupportedInfos("XPU", paddle::framework::proto::VarType::FP16));
Expand Down Expand Up @@ -244,7 +244,7 @@ inline bool NeedCast(const std::shared_ptr<VarType>& var) {
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place) ||
paddle::platform::is_custom_place(place)) {
// CudaPinndePlace is added for varbase created by dataloader
// CudaPinnedPlace is added for varbase created by dataloader
if (data_type == paddle::framework::proto::VarType::FP32 ||
data_type == paddle::framework::proto::VarType::FP16 ||
data_type == paddle::framework::proto::VarType::BF16) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/dynload/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ if(WITH_ROCM)
endif()

# There is no macOS version of NCCL.
# Disable nvrtc and cuda_driver api on MacOS, and only do a early test on Linux and Windows.
# Disable nvrtc and cuda_driver api on macOS, and only do an early test on Linux and Windows.
if(NOT APPLE)
list(APPEND CUDA_SRCS nvrtc.cc cuda_driver.cc)
if(WITH_NCCL)
Expand Down
2 changes: 1 addition & 1 deletion paddle/scripts/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF"
| `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. |
| `WITH_STYLE_CHECK` | ON | Check the code style when building. |
| `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu |
| `RUN_TEST` | OFF | Run unit test immediently after the build. |
| `RUN_TEST` | OFF | Run unit test immediately after the build. |

## Docker Images

Expand Down
16 changes: 8 additions & 8 deletions paddle/scripts/musl_build/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ mkdir -p build && cd build


```bash
# setup proxy addresss, when the speed of internet is not good.
# setup proxy address, when the speed of internet is not good.
# export HTTP_PROXY='http://127.0.0.1:8080'
# export HTTPS_PROXY='https://127.0.0.1:8080'

# invoke build paddle script
# all arguments, such as -j8 optinal, is past to make procedure.
# all arguments, such as -j8 optional, is past to make procedure.
../paddle/scripts/musl_build/build_paddle.sh -j8

# find output wheel package
Expand All @@ -57,7 +57,7 @@ ls ./output/*.whl
# checkout paddle source code
git clone /~https://github.com/PaddlePaddle/Paddle.git

# entery paddle directory
# enter paddle directory
cd ./Paddle

# build docker image
Expand Down Expand Up @@ -87,7 +87,7 @@ make -j8

# Scripts
1. **build_docker.sh**
compiling docker building script. it use alpine linux 3.10 as musl linux build enironment. it will try to install all the compiling tools, development packages, and python requirements for paddle musl compiling.
compiling docker building script. it use alpine linux 3.10 as musl linux build environment. it will try to install all the compiling tools, development packages, and python requirements for paddle musl compiling.

environment variables:
- PYTHON_VERSION: the version of python used for image building, default=3.7.
Expand All @@ -104,10 +104,10 @@ make -j8

environment variables:
- BUILD_MAN: build the paddle manually, default=0.
- WITH_TEST: build with unitest, and run unitest check, default=0.
- WITH_TEST: build with unittest, and run unittest check, default=0.
- WITH_PRUNE_CONTAINER: remove the container after building, default=1.
- CTEST_*: CTEST flages used for unit test.
- FLAGS_*: build flages used for paddle building.
- CTEST_*: CTEST flags used for unit test.
- FLAGS_*: build flags used for paddle building.
- HTTP_PROXY: use http proxy.
- HTTPS_PROXY: use https proxy.

Expand All @@ -118,4 +118,4 @@ make -j8
- **config.sh**: build config script for configure compiling option setting.
- **Dockerfile**: build docker definition file.
- **package.txt**: build required develop packages for alpine linux.
- **REAME.md**: this file.
- **README.md**: this file.
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/dist_fleet_ctr.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ def do_dataset_training(self, fleet):
fleet.save_persistables(exe, patch_dirname, None, 5)
fleet.check_save_pre_patch_done()

# add for gpugrahp
# add for gpu graph
fleet.save_cache_table(0, 0)
fleet.shrink()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/dist_sharding_save.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def runtime_main():


if __name__ == "__main__":
# NOTE(liangjianzhong): dist unittest should be imlpement using runtime_main in test_dist_base.py
# NOTE(liangjianzhong): dist unittest should be implemented using runtime_main in test_dist_base.py
# but the runtime_main in test_dist_base.py use the fleet, DistributedStrategy from
# paddle.incubate.distributed.fleet.collective which is not support by sharding (paddle.distributed.fleet).
# this should be update in future.
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/fluid/tests/unittests/eager_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs):
Args:
api_fn(callable): paddle api function
in_specs(list[tuple]): list of shape and dtype information for constructing input tensor of api_fn, such as [(shape, dtype), (shape, dtype)].
expected_dtype(list[str]): expected dtype of output tensor.
expect_dtypes(list[str]): expected dtype of output tensor.
target_index(int): indicate which one from in_specs to infer the dtype of output.
config(dict): other arguments of paddle api function
Expand Down Expand Up @@ -1364,14 +1364,14 @@ def _construct_grad_feed_map_from_forward(

def _get_need_run_ops(self, op_desc, fwd_op_desc=None):
"""Postorder traversal of the 'grad' tree to get all ops that need to run during inplace test.
An op needs to run druing inplace check if,
An op needs to run during inplace check if,
(1) it has infer_inplace,
(2) it has infer_inplace in its grad descendants. (since we need its outputs as to construct its grad's inputs)
Args:
op_desc (OpDesc): The op_desc of current op.
fwd_op_desc (OpDesc): The op_desc of current op's forward op, None if current op has no forward op.
Eg. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc.
E.g. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc.
Returns:
need_run_ops (list[(op_desc, fwd_op_desc)]): The ops that need to run during inplace test.
Expand Down Expand Up @@ -1540,7 +1540,7 @@ def _check_grad_inplace(
def check_inplace_output_with_place(
self, place, no_check_set=None, inplace_atol=None
):
"""Chech the inplace correctness of given op, its grad op, its grad_grad op, etc.
"""Check the inplace correctness of given op, its grad op, its grad_grad op, etc.
(1) Get all ops need to run. (see conditions in _get_need_run_ops())
(2) Run op in need_run_ops, and do inplace check if it has infer_inplace.
Expand Down Expand Up @@ -2115,7 +2115,7 @@ def find_fetch_index(target_name, fetch_list):
for var_name in var_names:
i = find_fetch_index(var_name, fetch_list)
if i == -1:
# The output is dispensiable or intermediate.
# The output is dispensable or intermediate.
break
out = fetch_outs[i]
if isinstance(out, core.LoDTensor):
Expand Down Expand Up @@ -2818,7 +2818,7 @@ def _get_gradient(
user_defined_grad_outputs = [user_defined_grad_outputs]
grad_outputs = []
for grad_out_value in user_defined_grad_outputs:
# `presistable` is used to avoid executor create new var in local scope
# `persistable` is used to avoid executor create new var in local scope
var = block.create_var(
shape=grad_out_value.shape,
dtype=grad_out_value.dtype,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ def get_static_double_grad(
program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program().
Returns:
A list of numpy array that stores second derivative result calulated by static graph.
A list of numpy array that stores second derivative result calculated by static graph.
"""

if program is None:
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/prim_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,7 @@ def check_jit_comp(self):
def check_jit_comp_with_cinn(self):
if self.prim_op_type == "prim":
return
# cinn doesn't suppoort cpu place
# cinn doesn't support cpu place
if (
type(self.place) == paddle.fluid.libpaddle.CPUPlace
and self.enable_cinn
Expand Down Expand Up @@ -869,7 +869,7 @@ def check(self):

def get_output_dict(self, np_outputs, api_outputs, outputs_sig):
assert len(api_outputs) <= len(outputs_sig), (
"forward api outputs length must be the less than or equal to KernelSignature outputs,but recive %s and %s"
"forward api outputs length must be the less than or equal to KernelSignature outputs,but receive %s and %s"
) % (len(api_outputs), len(outputs_sig))
output_dict = {}
for i in range(len(api_outputs)):
Expand Down Expand Up @@ -1249,7 +1249,7 @@ def check_jit_comp(self):
net.forward.program_cache.clear()

def check_jit_comp_with_cinn(self):
# cinn doesen't support cpu place
# cinn doesn't support cpu place
if (
type(self.place) is paddle.fluid.libpaddle.CPUPlace
and self.enable_cinn
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/seresnext_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def shortcut(input, ch_out, stride):

def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
# The number of first 1x1 convolutional channels for each bottleneck build block
# was halved to reduce the compution cost.
# was halved to reduce the computation cost.
conv0 = conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu'
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def square_wrapper(self, x):

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
# the shape of input variable should be clearly specified, not include -1.
shape = [2, 3, 7, 9]
eps = 0.005
dtype = np.float64
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@

from .utils.spectral_norm_hook import spectral_norm

# TODO: remove loss, keep it for too many used in unitests
# TODO: remove loss, keep it for too many used in unittests
from .layer import loss # noqa: F401

from . import utils # noqa: F401
Expand All @@ -171,7 +171,7 @@

@deprecated(
since="2.0.0",
update_to="paddle.nn.funcitional.diag_embed",
update_to="paddle.nn.functional.diag_embed",
level=1,
reason="diag_embed in paddle.nn will be removed in future",
)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/utils/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def _get_download(url, fullname):
"{}!".format(url, req.status_code)
)

# For protecting download interupted, download to
# For protecting download interrupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
Expand Down Expand Up @@ -295,7 +295,7 @@ def _decompress(fname):
"""
logger.info(f"Decompressing {fname}...")

# For protecting decompressing interupted,
# For protecting decompressing interrupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/utils/flops.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def _elementwise_flops_compute(input_shapes, attrs):
def _elementwise_add_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_add op.
For elementwise_add(input,other):
input_shapes = [shape_of_input, shape_of_ohther]
input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2, dim3 ...]
shape_of_other = [odim1, odim2, odim3...]
equation: flops = max(dim1, odim1) * max(dim2, odim2) * max()...
Expand All @@ -188,7 +188,7 @@ def _elementwise_add_flops(input_shapes, attrs):
def _elementwise_mul_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_mul op.
For elementwise_mul(input,other):
input_shapes = [shape_of_input, shape_of_ohther]
input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2, dim3 ...]
shape_of_other = [odim1, odim2, odim3...]
equation: flops = max(dim1, odim1) * max(dim2, odim2)* max()...
Expand All @@ -200,7 +200,7 @@ def _elementwise_mul_flops(input_shapes, attrs):
def _elementwise_div_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_div op.
For elementwise_div(input,other):
input_shapes = [shape_of_input, shape_of_ohther]
input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2, dim3 ...]
shape_of_other = [odim1, odim2, odim3...]
equation: flops = max(dim1,odim1)*max(dim2,odim2)*max()...
Expand Down Expand Up @@ -237,7 +237,7 @@ def _layer_norm_flops(input_shapes, attrs):
def _matmul_flops(input_shapes, attrs):
"""FLOPs computation for matmul op.
For matmul(input,other):
input_shapes = [shape_of_input, shape_of_ohther]
input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1,dim2 ...dim_n_1,dim_n] length:n
shape_of_other = [odim1,odim2 ... odim(n-m)... odim_m_1,dim_m] length:m
suppose n > m and dim_n = odim_m_1:
Expand Down Expand Up @@ -274,7 +274,7 @@ def _matmul_flops(input_shapes, attrs):
def _matmul_v2_flops(input_shapes, attrs):
"""FLOPs computation for matmul_v2 op.
For matmul_v2(input,other):
input_shapes = [shape_of_input, shape_of_ohther]
input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2 ...dim_n_1, dim_n] length:n
shape_of_other = [odim1, odim2 ... odim(n-m) ... odim_m_1, dim_m] length:m
suppose n > m and dim_n = odim_m_1:
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/utils/install_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def _prepare_data():

def _is_cuda_available():
"""
Check whether CUDA is avaiable.
Check whether CUDA is available.
"""
try:
assert len(paddle.static.cuda_places()) > 0
Expand All @@ -67,7 +67,7 @@ def _is_cuda_available():

def _is_xpu_available():
"""
Check whether XPU is avaiable.
Check whether XPU is available.
"""
try:
assert len(paddle.static.xpu_places()) > 0
Expand Down Expand Up @@ -154,7 +154,7 @@ def _run_static_single(use_cuda, use_xpu):

def train_for_run_parallel():
"""
train script for parallel traning check
train script for parallel training check
"""

# to avoid cyclic import
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/utils/layers_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
def convert_to_list(value, n, name, dtype=int):
"""
Converts a single numerical type or iterable of numerical
types into an numerical type list.
types into a numerical type list.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
Expand Down
Loading

0 comments on commit adca365

Please sign in to comment.