Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][A-[21-30]] Fix typos (allctor,almostly,alog,Aread,Allways,alway,ane,adn,expaned,annotaions,annotaion,architecure,architecuture,aer) #69482

Closed
wants to merge 18 commits into from
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
747b6c9
[CodeStyle][Typos]21,22,24
rich04lin Nov 18, 2024
58327a3
[CodeStyle][Typos][A-[23]] Fix typo (***)
rich04lin Nov 19, 2024
1d01a64
[CodeStyle][Typos][A-[21-30]]
rich04lin Nov 19, 2024
bfffae8
[CodeStyle][Typos][A-[21-30]]
rich04lin Nov 19, 2024
7f85ee6
[CodeStyle][Typos][A-[21-30]]
rich04lin Nov 19, 2024
1d34cbe
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
9a7f204
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
bb172b1
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
3aacefb
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
c7ca43a
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
0cf3cfd
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 19, 2024
532c0d1
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
f3d4dd7
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
d447ab8
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
34ffc2d
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
c12293e
[CodeStyle][Typos][A-[21-30]] Fix typo (allctor,almostly,alog,Aread,A…
rich04lin Nov 20, 2024
f029d13
Merge branch 'develop' into develop
rich04lin Nov 20, 2024
e453c78
Merge branch 'develop' into rich04
SigureMo Nov 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 1 addition & 16 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ grad = "grad"
arange = "arange"
ot = 'ot'
pash = 'pash'

anc = 'anc'
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

下面的空行加回来

# These words need to be fixed
ontext = 'ontext'
Direcly = 'Direcly'
Expand Down Expand Up @@ -65,7 +65,6 @@ bootom = 'bootom'
Vetical = 'Vetical'
mantain = 'mantain'
patition = 'patition'
almostly = 'almostly'
Dowloading = 'Dowloading'
Prepar = 'Prepar'
precent = 'precent'
Expand Down Expand Up @@ -117,7 +116,6 @@ decalared = 'decalared'
coalesc = 'coalesc'
graident = 'graident'
infered = 'infered'
Allways = 'Allways'
substitue = 'substitue'
Ouput = 'Ouput'
witk = 'witk'
Expand All @@ -127,7 +125,6 @@ staticly = 'staticly'
emited = 'emited'
repalce = 'repalce'
GARD = 'GARD'
annotaions = 'annotaions'
gloabl = 'gloabl'
devide = 'devide'
zerp = 'zerp'
Expand Down Expand Up @@ -155,7 +152,6 @@ endianess = 'endianess'
VAILD = 'VAILD'
ues = 'ues'
algorithem = 'algorithem'
aer = 'aer'
elemenents = 'elemenents'
CANN = 'CANN'
pathes = 'pathes'
Expand Down Expand Up @@ -226,8 +222,6 @@ Rto = 'Rto'
tunning = 'tunning'
kerenl = 'kerenl'
Temperarily = 'Temperarily'
alway = 'alway'
ane = 'ane'
cliping = 'cliping'
DEIVCE = 'DEIVCE'
neeed = 'neeed'
Expand Down Expand Up @@ -334,7 +328,6 @@ effeciently = 'effeciently'
workround = 'workround'
fucntion = 'fucntion'
sturcture = 'sturcture'
branchs = 'branchs'
udpated = 'udpated'
vunerability = 'vunerability'
funtion = 'funtion'
Expand Down Expand Up @@ -535,7 +528,6 @@ neigbhors = 'neigbhors'
subsitute = 'subsitute'
futher = 'futher'
vart = 'vart'
architecure = 'architecure'
passs = 'passs'
Propogation = 'Propogation'
partion = 'partion'
Expand All @@ -558,7 +550,6 @@ outputing = 'outputing'
hadler = 'hadler'
aggragate = 'aggragate'
qucik = 'qucik'
alog = 'alog'
exsit = 'exsit'
deciamls = 'deciamls'
uncorrectly = 'uncorrectly'
Expand Down Expand Up @@ -662,7 +653,6 @@ insid = 'insid'
coodinate = 'coodinate'
usefull = 'usefull'
sqaure = 'sqaure'
adn = 'adn'
intialize = 'intialize'
addtional = 'addtional'
Taget = 'Taget'
Expand Down Expand Up @@ -705,7 +695,6 @@ defaut = 'defaut'
formating = 'formating'
infor = 'infor'
becuase = 'becuase'
annotaion = 'annotaion'
temporaily = 'temporaily'
defferent = 'defferent'
Flattern = 'Flattern'
Expand Down Expand Up @@ -753,7 +742,6 @@ compitable = 'compitable'
comple = 'comple'
dealed = 'dealed'
ser = 'ser'
anc = 'anc'
contraints = 'contraints'
propogated = 'propogated'
beacuse = 'beacuse'
Expand All @@ -777,7 +765,6 @@ craete = 'craete'
expaned = 'expaned'
choos = 'choos'
whos = 'whos'
architecuture = 'architecuture'
argumet = 'argumet'
coule = 'coule'
instanciate = 'instanciate'
Expand Down Expand Up @@ -823,7 +810,6 @@ imformation = 'imformation'
kernerl = 'kernerl'
Boardcast = 'Boardcast'
Greate = 'Greate'
Alread = 'Alread'
unkown = 'unkown'
recevied = 'recevied'
Normlized = 'Normlized'
Expand All @@ -832,7 +818,6 @@ orginal = 'orginal'
Stati = 'Stati'
Destory = 'Destory'
seperately = 'seperately'
alloctor = 'alloctor'
fullfill = 'fullfill'
Substitude = 'Substitude'
producted = 'producted'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ struct SearchAlgorithm<ReverseTopoNodePairPattern,
template <typename Kind, typename GraphMatcher, typename GraphOperation>
void GraphTransformer(PatternGraph* graph) {
VLOG(4) << "Start GraphTransformer...";
auto alog = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
alog();
auto along = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto along = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);
auto algo = SearchAlgorithm<Kind, GraphMatcher, GraphOperation>(graph);

自己看上下文,这不是 SearchAlgorithm 返回一个 Algorithm 吗?

along();
}

} // namespace cinn::fusion
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/fused_attention_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -389,8 +389,8 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
"The qkv_w shape is (h, 3h), do transpose to it.")
.SetDefault(false);
AddAttr<bool>("pre_layer_norm",
"if true, the attention op uses pre_layer_norm architecure, "
"else, uses post_layer_norm architecuture. "
"if true, the attention op uses pre_layer_norm architecture, "
"else, uses post_layer_norm architecture. "
"[default false].")
.SetDefault(false);
AddAttr<float>("epsilon",
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2476,7 +2476,7 @@ All parameter, weight, gradient are variables in Paddle.

m.def("_is_program_version_supported", IsProgramVersionSupported);
#if defined(PADDLE_WITH_CUDA)
m.def("alloctor_dump", [](const phi::GPUPlace &place) {
m.def("allocator_dump", [](const phi::GPUPlace &place) {
auto allocator = std::dynamic_pointer_cast<
paddle::memory::allocation::AutoGrowthBestFitAllocator>(
paddle::memory::allocation::AllocatorFacade::Instance()
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/masked_select_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,16 @@ void MaskedSelectGradKernel(const Context& dev_ctx,

auto expanded_size = funcs::MatrixGetBroadcastBatchPortion(
common::vectorize(x_grad->dims()), common::vectorize(mask.dims()));
auto expaned_dims = common::make_ddim(expanded_size);
auto expanded_dims = common::make_ddim(expanded_size);

if (mask.dims() != expaned_dims) {
if (mask.dims() != expanded_dims) {
ExpandKernel<bool, Context>(
dev_ctx, mask, IntArray(expanded_size), &mask_expand);
} else {
mask_expand = mask;
}

if (x_grad->dims() != expaned_dims) {
if (x_grad->dims() != expanded_dims) {
x_grad_expand = Empty<T, Context>(dev_ctx, IntArray(expanded_size));
expand_x = true;
} else {
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/gpu/masked_select_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,16 @@ void MaskedSelectGradKernel(const Context& dev_ctx,

auto expanded_size = funcs::MatrixGetBroadcastBatchPortion(
common::vectorize(x_grad->dims()), common::vectorize(mask.dims()));
auto expaned_dims = common::make_ddim(expanded_size);
auto expanded_dims = common::make_ddim(expanded_size);

if (mask.dims() != expaned_dims) {
if (mask.dims() != expanded_dims) {
ExpandKernel<bool, Context>(
dev_ctx, mask, IntArray(expanded_size), &mask_expand);
} else {
mask_expand = mask;
}

if (x_grad->dims() != expaned_dims) {
if (x_grad->dims() != expanded_dims) {
x_grad_expand = Empty<T, Context>(dev_ctx, IntArray(expanded_size));
expand_x = true;
} else {
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/passes/auto_parallel_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def _keep_fp32_input(op, in_name):

# TODO check if bf16 and fp16 still share the same logic
def _keep_fp32_output(op, out_name):
# TODO(lizhiyu02): Support 'custom_white_list' adn 'custom_black_list' in amp_options
# TODO(lizhiyu02): Support 'custom_white_list' and 'custom_black_list' in amp_options
if not op.amp_options.enable:
return True
op_type = op.type
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/asp/supported_layer_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _default_pruning(
# In sparse training, layer weight matrices is viewed sparse matrix A, so
# the math formula should be 'Act(WX + b)'. However, default formula in PaddlePaddle
# is 'Act(XW + b)'. For enabling SPMMA, weights and inputs should be transposed
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune alog k dimension
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune log k dimension
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune log k dimension
# for computing, Act( (W^T X^T)^T + b). Therefore, we have to prune along k dimension

从 71 行能看出来是 along

# of W^T, which is m dimension of W. Moreover, all mask generating functions in
# asp/utils is row-major pruning. That is the reason we have to transpose weight
# matrices before invoking create_mask. Then we transpose the result mask to make
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/utils/transform_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from paddle._typing import ShapeLike


# input==output, inplace strategy of reshape has no cost almostly
# input==output, inplace strategy of reshape has no cost almost
def _inplace_reshape_dygraph(x: Tensor, shape: ShapeLike) -> None:
x_shape = _create_tensor(dtype='int64')
if in_dygraph_mode():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ def __init__(
self._quantized_weight_var_name = set()
self._quantized_act_var_name = set()
self._weight_op_pairs = {}
# The vars for alog = KL or hist
# The vars for along = KL or hist
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# The vars for along = KL or hist
# The vars for algo = KL or hist

算法 KL

self._sampling_act_abs_min_max = {}
self._sampling_act_histogram = {}
self._sampling_data = {}
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/tensor.prototype.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# The `Tensor` template `tensor.prototype.pyi` for `tools/gen_tensor_stub.py` to generate the stub file `tensor.pyi`.
# Add docstring, attributes, methods and alias with type annotaions for `Tensor` in `tensor.prototype.pyi`
# Add docstring, attributes, methods and alias with type annotations for `Tensor` in `tensor.prototype.pyi`
# if not conveniently coding in original place (like c++ source file).

# Import common typings for generated methods
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/platform/errors_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ TEST(Errors, NotFound) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(NotFound); }

TEST(Errors, OutOfRange) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(OutOfRange); }

TEST(Errors, AlreadExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }
TEST(Errors, AlreadyExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }

TEST(Errors, ResourceExhausted) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ResourceExhausted);
Expand Down
10 changes: 5 additions & 5 deletions test/cpp/phi/kernels/test_gpu_timer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -86,19 +86,19 @@ TEST(GpuTimer, Sum) {
#endif

using Functor = std::function<void(float *, float *, size_t)>;
Functor alog0 = Algo<4, 256, 1024>;
Functor algo1 = Algo<1, 256, 1024>;
Functor alog2 = Algo<1, 256, 8>;
Functor along0 = Along<4, 256, 1024>;
Functor along1 = Along<1, 256, 1024>;
Functor along2 = Along<1, 256, 8>;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Functor along0 = Along<4, 256, 1024>;
Functor along1 = Along<1, 256, 1024>;
Functor along2 = Along<1, 256, 8>;
Functor algo0 = Algo<4, 256, 1024>;
Functor algo1 = Algo<1, 256, 1024>;
Functor algo2 = Algo<1, 256, 8>;

算法算法算法,Along 是什么鬼?


std::vector<Functor> algos = {alog0, algo1, alog2};
std::vector<Functor> algos = {along0, along1, along2};

for (int j = 0; j < algos.size(); ++j) {
auto algo = algos[j];
phi::GpuTimer timer;
timer.Start(0);
algo(d_in1, d_in2, N);
timer.Stop(0);
VLOG(3) << "alog: " << j << " cost: " << timer.ElapsedTime() << "ms";
VLOG(3) << "along: " << j << " cost: " << timer.ElapsedTime() << "ms";
}

#ifdef __HIPCC__
Expand Down
2 changes: 1 addition & 1 deletion test/dataset/imikolov_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def test_train(self):
self.check_reader(paddle.dataset.imikolov.train(WORD_DICT, n), n)

first_line = (
'aer banknote berlitz calloway centrust cluett fromstein '
'are banknote berlitz calloway centrust cluett fromstein '
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里不能修改,因为这里本来就不是有效词汇,需要将本文件在配置中 ignore 掉

参考 crate-ci/typos#316 (comment)

配置参考 https://github.com/crate-ci/typos/blob/master/docs/reference.md#config-fields

可用 files.ignore-files 配置项

@MrXnneHang 可以在 Q&A 里提及,这种本身就是使用一些 typos 的字面量,整个文件可以 ignore

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

好。

Copy link
Member

@SigureMo SigureMo Nov 19, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

aer[无效单词]

这个加到配置了吗?按文件加到配置

'gitano guterman hydro-quebec ipo kia memotec mlx nahb punts '
'rake regatta rubens sim snack-food ssangyong swapo wachter'
)
Expand Down
2 changes: 1 addition & 1 deletion tools/gen_pybind11_stub.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def wrapper(self, arg: Any):


def _patch_pybind11_invalid_annotation():
# patch invalid annotaion as `Value`, e.g. 'capsule' to 'typing_extensions.CapsuleType'
# patch invalid annotation as `Value`, e.g. 'capsule' to 'typing_extensions.CapsuleType'
def wrap_name(func):
@functools.wraps(func)
def wrapper(self, arg: Annotation):
Expand Down