Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][A-[21-30]] Fix typos (allctor,almostly,alog,Aread,Allways,alway,ane,adn,expaned,annotaions,annotaion,architecure,architecuture,aer) #69482

Closed
wants to merge 18 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
747b6c9
[CodeStyle][Typos]21,22,24
rich04lin Nov 18, 2024
58327a3
[CodeStyle][Typos][A-[23]] Fix typo (***)
rich04lin Nov 19, 2024
1d01a64
[CodeStyle][Typos][A-[21-30]]
rich04lin Nov 19, 2024
bfffae8
[CodeStyle][Typos][A-[21-30]]
rich04lin Nov 19, 2024
7f85ee6
[CodeStyle][Typos][A-[21-30]]
rich04lin Nov 19, 2024
1d34cbe
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
9a7f204
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
bb172b1
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
3aacefb
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
c7ca43a
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
0cf3cfd
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
532c0d1
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
f3d4dd7
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
d447ab8
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
34ffc2d
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
c12293e
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
f029d13
Merge branch 'develop' into develop
rich04lin Nov 20, 2024
e453c78
Merge branch 'develop' into rich04
SigureMo Nov 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 5 additions & 15 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@ grad = "grad"
arange = "arange"
ot = 'ot'
pash = 'pash'
eles = 'eles'

anc = 'anc'
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

下面的空行加回来




# These words need to be fixed
ontext = 'ontext'
Expand Down Expand Up @@ -71,7 +74,6 @@ bootom = 'bootom'
Vetical = 'Vetical'
mantain = 'mantain'
patition = 'patition'
almostly = 'almostly'
Dowloading = 'Dowloading'
Prepar = 'Prepar'
precent = 'precent'
Expand Down Expand Up @@ -122,7 +124,6 @@ decalared = 'decalared'
coalesc = 'coalesc'
graident = 'graident'
infered = 'infered'
Allways = 'Allways'
substitue = 'substitue'
Ouput = 'Ouput'
witk = 'witk'
Expand All @@ -132,7 +133,6 @@ staticly = 'staticly'
emited = 'emited'
repalce = 'repalce'
GARD = 'GARD'
annotaions = 'annotaions'
gloabl = 'gloabl'
devide = 'devide'
zerp = 'zerp'
Expand Down Expand Up @@ -227,8 +227,6 @@ Rto = 'Rto'
tunning = 'tunning'
kerenl = 'kerenl'
Temperarily = 'Temperarily'
alway = 'alway'
ane = 'ane'
cliping = 'cliping'
DEIVCE = 'DEIVCE'
neeed = 'neeed'
Expand Down Expand Up @@ -330,7 +328,6 @@ effeciently = 'effeciently'
workround = 'workround'
fucntion = 'fucntion'
sturcture = 'sturcture'
branchs = 'branchs'
udpated = 'udpated'
vunerability = 'vunerability'
funtion = 'funtion'
Expand Down Expand Up @@ -527,7 +524,6 @@ neigbhors = 'neigbhors'
subsitute = 'subsitute'
futher = 'futher'
vart = 'vart'
architecure = 'architecure'
passs = 'passs'
Propogation = 'Propogation'
partion = 'partion'
Expand All @@ -549,7 +545,6 @@ instrinsics = 'instrinsics'
outputing = 'outputing'
hadler = 'hadler'
qucik = 'qucik'
alog = 'alog'
exsit = 'exsit'
deciamls = 'deciamls'
uncorrectly = 'uncorrectly'
Expand Down Expand Up @@ -652,7 +647,6 @@ insid = 'insid'
coodinate = 'coodinate'
usefull = 'usefull'
sqaure = 'sqaure'
adn = 'adn'
intialize = 'intialize'
Taget = 'Taget'
parm = 'parm'
Expand Down Expand Up @@ -693,7 +687,6 @@ defaut = 'defaut'
formating = 'formating'
infor = 'infor'
becuase = 'becuase'
annotaion = 'annotaion'
temporaily = 'temporaily'
defferent = 'defferent'
Flattern = 'Flattern'
Expand Down Expand Up @@ -740,7 +733,6 @@ compitable = 'compitable'
comple = 'comple'
dealed = 'dealed'
ser = 'ser'
anc = 'anc'
contraints = 'contraints'
propogated = 'propogated'
beacuse = 'beacuse'
Expand All @@ -764,7 +756,7 @@ craete = 'craete'
expaned = 'expaned'
choos = 'choos'
whos = 'whos'
architecuture = 'architecuture'
argumet = 'argumet'
coule = 'coule'
instanciate = 'instanciate'
distrubuted = 'distrubuted'
Expand Down Expand Up @@ -807,7 +799,6 @@ imformation = 'imformation'
kernerl = 'kernerl'
Boardcast = 'Boardcast'
Greate = 'Greate'
Alread = 'Alread'
unkown = 'unkown'
recevied = 'recevied'
Normlized = 'Normlized'
Expand All @@ -816,7 +807,6 @@ orginal = 'orginal'
Stati = 'Stati'
Destory = 'Destory'
seperately = 'seperately'
alloctor = 'alloctor'
fullfill = 'fullfill'
Substitude = 'Substitude'
producted = 'producted'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ struct SearchAlgorithm<ReverseTopoNodePairPattern,
template <typename Kind, typename GraphMatcher, typename GraphOperation>
void GraphTransformer(PatternGraph* graph) {
VLOG(4) << "Start GraphTransformer...";
auto alog = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
alog();
auto algo = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
algo();
}

} // namespace cinn::fusion
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/fused_attention_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -389,8 +389,8 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
"The qkv_w shape is (h, 3h), do transpose to it.")
.SetDefault(false);
AddAttr<bool>("pre_layer_norm",
"if true, the attention op uses pre_layer_norm architecure, "
"else, uses post_layer_norm architecuture. "
"if true, the attention op uses pre_layer_norm architecture, "
"else, uses post_layer_norm architecture. "
"[default false].")
.SetDefault(false);
AddAttr<float>("epsilon",
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2476,7 +2476,7 @@ All parameter, weight, gradient are variables in Paddle.

m.def("_is_program_version_supported", IsProgramVersionSupported);
#if defined(PADDLE_WITH_CUDA)
m.def("alloctor_dump", [](const phi::GPUPlace &place) {
m.def("allocator_dump", [](const phi::GPUPlace &place) {
auto allocator = std::dynamic_pointer_cast<
paddle::memory::allocation::AutoGrowthBestFitAllocator>(
paddle::memory::allocation::AllocatorFacade::Instance()
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/masked_select_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,16 @@ void MaskedSelectGradKernel(const Context& dev_ctx,

auto expanded_size = funcs::MatrixGetBroadcastBatchPortion(
common::vectorize(x_grad->dims()), common::vectorize(mask.dims()));
auto expaned_dims = common::make_ddim(expanded_size);
auto expanded_dims = common::make_ddim(expanded_size);

if (mask.dims() != expaned_dims) {
if (mask.dims() != expanded_dims) {
ExpandKernel<bool, Context>(
dev_ctx, mask, IntArray(expanded_size), &mask_expand);
} else {
mask_expand = mask;
}

if (x_grad->dims() != expaned_dims) {
if (x_grad->dims() != expanded_dims) {
x_grad_expand = Empty<T, Context>(dev_ctx, IntArray(expanded_size));
expand_x = true;
} else {
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,16 @@ void MaskedSelectGradKernel(const Context& dev_ctx,

auto expanded_size = funcs::MatrixGetBroadcastBatchPortion(
common::vectorize(x_grad->dims()), common::vectorize(mask.dims()));
auto expaned_dims = common::make_ddim(expanded_size);
auto expanded_dims = common::make_ddim(expanded_size);

if (mask.dims() != expaned_dims) {
if (mask.dims() != expanded_dims) {
ExpandKernel<bool, Context>(
dev_ctx, mask, IntArray(expanded_size), &mask_expand);
} else {
mask_expand = mask;
}

if (x_grad->dims() != expaned_dims) {
if (x_grad->dims() != expanded_dims) {
x_grad_expand = Empty<T, Context>(dev_ctx, IntArray(expanded_size));
expand_x = true;
} else {
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/passes/auto_parallel_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def _keep_fp32_input(op, in_name):

# TODO check if bf16 and fp16 still share the same logic
def _keep_fp32_output(op, out_name):
# TODO(lizhiyu02): Support 'custom_white_list' adn 'custom_black_list' in amp_options
# TODO(lizhiyu02): Support 'custom_white_list' and 'custom_black_list' in amp_options
if not op.amp_options.enable:
return True
op_type = op.type
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/asp/supported_layer_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _default_pruning(
# In sparse training, layer weight matrices is viewed sparse matrix A, so
# the math formula should be 'Act(WX + b)'. However, default formula in PaddlePaddle
# is 'Act(XW + b)'. For enabling SPMMA, weights and inputs should be transposed
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune alog k dimension
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune along k dimension
# of W^T, which is m dimension of W. Moreover, all mask generating functions in
# asp/utils is row-major pruning. That is the reason we have to transpose weight
# matrices before invoking create_mask. Then we transpose the result mask to make
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/utils/transform_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from paddle._typing import ShapeLike


# input==output, inplace strategy of reshape has no cost almostly
# input==output, inplace strategy of reshape has no cost almost
def _inplace_reshape_dygraph(x: Tensor, shape: ShapeLike) -> None:
x_shape = _create_tensor(dtype='int64')
if in_dygraph_mode():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ def __init__(
self._quantized_weight_var_name = set()
self._quantized_act_var_name = set()
self._weight_op_pairs = {}
# The vars for alog = KL or hist
# The vars for algo = KL or hist
self._sampling_act_abs_min_max = {}
self._sampling_act_histogram = {}
self._sampling_data = {}
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/tensor.prototype.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# The `Tensor` template `tensor.prototype.pyi` for `tools/gen_tensor_stub.py` to generate the stub file `tensor.pyi`.
# Add docstring, attributes, methods and alias with type annotaions for `Tensor` in `tensor.prototype.pyi`
# Add docstring, attributes, methods and alias with type annotations for `Tensor` in `tensor.prototype.pyi`
# if not conveniently coding in original place (like c++ source file).

# Import common typings for generated methods
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/platform/errors_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ TEST(Errors, NotFound) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(NotFound); }

TEST(Errors, OutOfRange) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(OutOfRange); }

TEST(Errors, AlreadExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }
TEST(Errors, AlreadyExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }

TEST(Errors, ResourceExhausted) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ResourceExhausted);
Expand Down
8 changes: 4 additions & 4 deletions test/cpp/phi/kernels/test_gpu_timer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -86,19 +86,19 @@ TEST(GpuTimer, Sum) {
#endif

using Functor = std::function<void(float *, float *, size_t)>;
Functor alog0 = Algo<4, 256, 1024>;
Functor algo0 = Algo<4, 256, 1024>;
Functor algo1 = Algo<1, 256, 1024>;
Functor alog2 = Algo<1, 256, 8>;
Functor algo2 = Algo<1, 256, 8>;

std::vector<Functor> algos = {alog0, algo1, alog2};
std::vector<Functor> algos = {algo0, algo1, algo2};

for (int j = 0; j < algos.size(); ++j) {
auto algo = algos[j];
phi::GpuTimer timer;
timer.Start(0);
algo(d_in1, d_in2, N);
timer.Stop(0);
VLOG(3) << "alog: " << j << " cost: " << timer.ElapsedTime() << "ms";
VLOG(3) << "algo: " << j << " cost: " << timer.ElapsedTime() << "ms";
}

#ifdef __HIPCC__
Expand Down
2 changes: 1 addition & 1 deletion tools/gen_pybind11_stub.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def wrapper(self, arg: Any):


def _patch_pybind11_invalid_annotation():
# patch invalid annotaion as `Value`, e.g. 'capsule' to 'typing_extensions.CapsuleType'
# patch invalid annotation as `Value`, e.g. 'capsule' to 'typing_extensions.CapsuleType'
def wrap_name(func):
@functools.wraps(func)
def wrapper(self, arg: Annotation):
Expand Down