diff --git a/_typos.toml b/_typos.toml index bf863607fded8..6db1265618f71 100644 --- a/_typos.toml +++ b/_typos.toml @@ -54,11 +54,6 @@ vaccum = 'vaccum' # These words need to be fixed Operants = 'Operants' operants = 'operants' -outout = 'outout' -ouput = 'ouput' -outpout = 'outpout' -ouptut = 'ouptut' -Ouput = 'Ouput' setted = 'setted' storeage = 'storeage' sotring = 'sotring' diff --git a/paddle/cinn/hlir/pe/nn.h b/paddle/cinn/hlir/pe/nn.h index 64189eac54e5a..c59e774bd1a04 100755 --- a/paddle/cinn/hlir/pe/nn.h +++ b/paddle/cinn/hlir/pe/nn.h @@ -464,7 +464,7 @@ ir::Tensor DropoutInfer( * out = true_value * 2. condition expr = false * out = false_value - * @param ouput_name : the name of the output tensor. + * @param output_name : the name of the output tensor. */ ir::Tensor Select(const ir::Tensor &condition, const ir::Tensor &true_value, diff --git a/paddle/cinn/hlir/pe/reduction.cc b/paddle/cinn/hlir/pe/reduction.cc index 444acbd89e32f..d81ee0158b004 100644 --- a/paddle/cinn/hlir/pe/reduction.cc +++ b/paddle/cinn/hlir/pe/reduction.cc @@ -345,7 +345,7 @@ std::vector WarpReduce(const ir::Tensor& A, }, UniqName(output_name + "_" + reduce_type)); - // compute ouput shape. + // compute output shape. std::vector out_shape(A->shape.begin(), A->shape.begin() + shape_size_without_reduce_dim); for (int idx = 0; idx < last_reduce_dim_num && keep_dim; ++idx) { diff --git a/paddle/cinn/operator_fusion/utils.cc b/paddle/cinn/operator_fusion/utils.cc index 085862be84ffd..3fee6e8a56eb7 100644 --- a/paddle/cinn/operator_fusion/utils.cc +++ b/paddle/cinn/operator_fusion/utils.cc @@ -158,7 +158,7 @@ std::vector> GetNonBroadCastDims(pir::Operation* op) { PADDLE_ENFORCE_GE(output_rank, input_rank, ::common::errors::PreconditionNotMet( - "[Error info] The ouput_rank should " + "[Error info] The output_rank should " "be greater or equal to input_rank.")); // Compare axis one by one, from back to front. diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index b43baba0a12c8..30b5767f0f0d6 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -450,7 +450,7 @@ std::vector TopologySortGraphByDescOrder(const Graph &graph) { return ret; } -void RemoveControlDepInputAndOuput(OpDesc *op_desc) { +void RemoveControlDepInputAndOutput(OpDesc *op_desc) { auto remove_control_dep_var = [](VariableNameMap *var_name_map) { for (auto &pair : *var_name_map) { std::vector &var_names = pair.second; @@ -736,7 +736,7 @@ static void GraphToBlock(const Graph &graph, GetGraphOpDesc(nodes, block, &ops, graph, graph_idx); for (auto &op : ops) { - RemoveControlDepInputAndOuput(&op); + RemoveControlDepInputAndOutput(&op); block->add_ops()->MergeFrom(*op.Proto()); } } diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 89f26450c736f..b43a6198dc749 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -4646,7 +4646,7 @@ PDNode *patterns::FusedFeedForwardBwd::operator()( // other cases: may delete residual_add_grad, dropout1_grad, dropout2_grad // operators - // intermediate input_grad, and final pattern ouput_grad + // intermediate input_grad, and final pattern output_grad PDNode *out_grad = x_grad; // LayerNorm: in["Mean", "Variance", "Scale", "Bias", "Y@GRAD"], // out["X@GRAD", "Scale@GRAD", "Bias@GRAD"] diff --git a/paddle/fluid/inference/openvino/engine.cc b/paddle/fluid/inference/openvino/engine.cc index 7385148dfdf57..1e88da715814f 100644 --- a/paddle/fluid/inference/openvino/engine.cc +++ b/paddle/fluid/inference/openvino/engine.cc @@ -27,8 +27,8 @@ bool OpenVINOEngine::IsModelStatic() { return isStatic; } -ov::Shape OpenVINOEngine::GetOuputShape(const std::string& output_name, - int64_t index) { +ov::Shape OpenVINOEngine::GetOutputShape(const std::string& output_name, + int64_t index) { auto ov_output_shape = HaveOutputTensorName(output_name) ? infer_request_.get_tensor(output_name).get_shape() @@ -36,9 +36,9 @@ ov::Shape OpenVINOEngine::GetOuputShape(const std::string& output_name, return ov_output_shape; } -phi::DataType OpenVINOEngine::GetOuputType(const std::string& output_name, - int64_t index, - ov::element::Type ov_paddle_type) { +phi::DataType OpenVINOEngine::GetOutputType(const std::string& output_name, + int64_t index, + ov::element::Type ov_paddle_type) { auto output_ov_type = HaveOutputTensorName(output_name) ? infer_request_.get_tensor(output_name).get_element_type() @@ -56,9 +56,9 @@ phi::DataType OpenVINOEngine::GetOuputType(const std::string& output_name, return OVType2PhiType(output_ov_type); } -void OpenVINOEngine::CopyOuputDataByName(const std::string& output_name, - int64_t index, - void* pd_data) { +void OpenVINOEngine::CopyOutputDataByName(const std::string& output_name, + int64_t index, + void* pd_data) { auto ov_tensor = HaveOutputTensorName(output_name) ? infer_request_.get_tensor(output_name) : infer_request_.get_output_tensor(index); diff --git a/paddle/fluid/inference/openvino/engine.h b/paddle/fluid/inference/openvino/engine.h index e63d290ef35a3..706485253130c 100644 --- a/paddle/fluid/inference/openvino/engine.h +++ b/paddle/fluid/inference/openvino/engine.h @@ -174,13 +174,13 @@ class OpenVINOEngine { ov::Model* model() { return model_.get(); } ov::CompiledModel compiled_model() { return complied_model_; } ov::InferRequest infer_request() { return infer_request_; } - ov::Shape GetOuputShape(const std::string& name, int64_t index); - phi::DataType GetOuputType(const std::string& name, - int64_t index, - ov::element::Type ov_paddle_type); - void CopyOuputDataByName(const std::string& output_name, - int64_t index, - void* pd_data); + ov::Shape GetOutputShape(const std::string& name, int64_t index); + phi::DataType GetOutputType(const std::string& name, + int64_t index, + ov::element::Type ov_paddle_type); + void CopyOutputDataByName(const std::string& output_name, + int64_t index, + void* pd_data); void Execute(); private: diff --git a/paddle/fluid/operators/openvino/openvino_engine_op.h b/paddle/fluid/operators/openvino/openvino_engine_op.h index c35445d5b919f..c83fcab913c41 100644 --- a/paddle/fluid/operators/openvino/openvino_engine_op.h +++ b/paddle/fluid/operators/openvino/openvino_engine_op.h @@ -214,8 +214,8 @@ class OpenVINOEngineOp : public framework::OperatorBase { common::errors::NotFound( "Output variable %s is not found in Openvino subgraph.", y)); auto *fluid_t = fluid_v->GetMutable(); - auto ov_output_shape = engine->GetOuputShape(output_names_[i], i); - auto phi_type = engine->GetOuputType( + auto ov_output_shape = engine->GetOutputShape(output_names_[i], i); + auto phi_type = engine->GetOutputType( output_names_[i], i, inference::openvino::VarType2OVType(ori_var_type)); @@ -224,7 +224,7 @@ class OpenVINOEngineOp : public framework::OperatorBase { ddim.push_back(ov_output_shape[j]); } fluid_t->Resize(common::make_ddim(ddim)); - engine->CopyOuputDataByName( + engine->CopyOutputDataByName( output_names_[i], i, fluid_t->mutable_data(dev_place, phi_type)); } } diff --git a/paddle/phi/infermeta/spmd_rules/default_data_parallel.h b/paddle/phi/infermeta/spmd_rules/default_data_parallel.h index 54119819fa4ef..49d4d0854f183 100644 --- a/paddle/phi/infermeta/spmd_rules/default_data_parallel.h +++ b/paddle/phi/infermeta/spmd_rules/default_data_parallel.h @@ -24,10 +24,10 @@ namespace phi { namespace distributed { /** * A **hack** rule with a strong assumption that the first dimension of - * all the input and ouput tensors is the batch dimension (broadcast dimension), - * therefore, if any tensor's first dimension is sharded, the sharding would be - * propagating to all the other tensors (for tensor first dimension). All the - * other axes of tensors would be set as unshard (-1). + * all the input and output tensors is the batch dimension (broadcast + * dimension), therefore, if any tensor's first dimension is sharded, the + * sharding would be propagating to all the other tensors (for tensor first + * dimension). All the other axes of tensors would be set as unshard (-1). * * * This rule is used to support emerging op for hybrid parallelism quickly, and diff --git a/paddle/phi/kernels/funcs/interpolate_function.h b/paddle/phi/kernels/funcs/interpolate_function.h index 374602111c094..070774c053250 100644 --- a/paddle/phi/kernels/funcs/interpolate_function.h +++ b/paddle/phi/kernels/funcs/interpolate_function.h @@ -190,10 +190,10 @@ struct FastDivModForInterpolate { explicit HOSTDEVICE FastDivModForInterpolate(const int channels, const int output_w, - const int outout_wc) + const int output_wc) : channels_div(FastDivMod(channels)), output_w_div(FastDivMod(output_w)), - output_wc_div(FastDivMod(outout_wc)) {} + output_wc_div(FastDivMod(output_wc)) {} }; #endif diff --git a/paddle/phi/kernels/funcs/pooling.cu b/paddle/phi/kernels/funcs/pooling.cu index 9a0db0653e4a4..36127ee35a05f 100644 --- a/paddle/phi/kernels/funcs/pooling.cu +++ b/paddle/phi/kernels/funcs/pooling.cu @@ -312,10 +312,10 @@ __global__ void KernelPool2DGrad(const int nthreads, int output_sub_idx = channel_last ? tmp_idx * divmods.channel.divisor + c_offset : tmp_idx; - T ouput_value = pool_process.use_x ? output_data[output_sub_idx] - : static_cast(0); + T output_value = pool_process.use_x ? output_data[output_sub_idx] + : static_cast(0); pool_process.compute(input, - ouput_value, + output_value, output_grad[output_sub_idx], static_cast(1.0 / pool_size), &input_grad_data); @@ -343,10 +343,10 @@ __global__ void KernelPool2DGrad(const int nthreads, int output_sub_idx = channel_last ? tmp_idx * divmods.channel.divisor + c_offset : tmp_idx; - T ouput_value = pool_process.use_x ? output_data[output_sub_idx] - : static_cast(0); + T output_value = pool_process.use_x ? output_data[output_sub_idx] + : static_cast(0); pool_process.compute(input, - ouput_value, + output_value, output_grad[output_sub_idx], static_cast(1.0 / pool_size), &input_grad_data); @@ -360,10 +360,10 @@ __global__ void KernelPool2DGrad(const int nthreads, int output_sub_idx = channel_last ? tmp_idx * divmods.channel.divisor + c_offset : tmp_idx; - T ouput_value = pool_process.use_x ? output_data[output_sub_idx] - : static_cast(0); + T output_value = pool_process.use_x ? output_data[output_sub_idx] + : static_cast(0); pool_process.compute(input, - ouput_value, + output_value, output_grad[output_sub_idx], static_cast(1.0 / pool_size), &input_grad_data); @@ -1267,10 +1267,10 @@ __global__ void KernelPool3DGrad(const int nthreads, ? ((pd * output_height + ph) * output_width + pw) * channels + c_offset : (pd * output_height + ph) * output_width + pw; - T ouput_value = pool_process.use_x ? output_data[output_sub_idx] - : static_cast(0); + T output_value = pool_process.use_x ? output_data[output_sub_idx] + : static_cast(0); pool_process.compute(input, - ouput_value, + output_value, output_grad[output_sub_idx], static_cast(1.0 / pool_size), &input_grad_data); diff --git a/paddle/phi/kernels/impl/bessel_grad_kernel_cuda_impl.h b/paddle/phi/kernels/impl/bessel_grad_kernel_cuda_impl.h index b3e272800b482..a0c850e7207ed 100644 --- a/paddle/phi/kernels/impl/bessel_grad_kernel_cuda_impl.h +++ b/paddle/phi/kernels/impl/bessel_grad_kernel_cuda_impl.h @@ -26,7 +26,7 @@ struct CudaI0GradFunctor { using MT = typename phi::dtype::MPTypeTrait::Type; const MT mp_x = static_cast(_x); const MT mp_out_grad = static_cast(_out_grad); - // get ouput of i1 + // get output of i1 MT x = std::abs(mp_x); if (x <= MT{8.0}) { auto coeff_pair_A = ChebyshevCoefficientsI1e_A(); diff --git a/paddle/pir/include/core/operation.h b/paddle/pir/include/core/operation.h index 5d4a17f21452c..c0943caeb0bac 100644 --- a/paddle/pir/include/core/operation.h +++ b/paddle/pir/include/core/operation.h @@ -126,7 +126,7 @@ class IR_API alignas(8) Operation final void *value_property(const std::string &key, size_t index) const; /// - /// \brief op ouput related public interfaces + /// \brief op output related public interfaces /// uint32_t num_results() const { return num_results_; } Value result(uint32_t index) const { return OpResult(op_result_impl(index)); } diff --git a/python/paddle/distributed/auto_parallel/static/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py index 231d855b4a257..ac6380c57174d 100644 --- a/python/paddle/distributed/auto_parallel/static/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -1378,7 +1378,7 @@ def _initialize(self, mode, init_parameters=True): self.program_helper.init_pir( self._pir_dist_main_progs[mode], self._place ) - changed_ouput_op_list = [] + changed_output_op_list = [] if self._executor is None: self._executor = paddle.static.Executor(self._place) startup_prog = self._startup_progs[mode].clone() @@ -1436,7 +1436,7 @@ def _initialize(self, mode, init_parameters=True): ) if src_value.persistable: src_value.persistable = False - changed_ouput_op_list.append(op) + changed_output_op_list.append(op) op.operand(0).set_source(reshard_var) for del_op in del_ops: del_op.erase() @@ -1446,7 +1446,7 @@ def _initialize(self, mode, init_parameters=True): paddle.base.libpaddle.pir.apply_dist2dense_pass(startup_prog) remove_unuseful_comm_op_pass(startup_prog) - for op in changed_ouput_op_list: + for op in changed_output_op_list: op.operand_source(0).persistable = True self._executor.run(startup_prog) if self._job_plan is not None: diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index 171969478ab34..cb22d794fcf0a 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -136,7 +136,7 @@ def check_view_api_used_by_inplace(program: paddle.pir.Program) -> None: skipped_inplace_ops = [ "pd_op.set_value_", "pd_op.set_value_with_tensor_", - # It willn't change tensor imdeiately,but it's ouput is dangerous. + # It willn't change tensor imdeiately,but it's output is dangerous. "pd_op.share_data_", ] diff --git a/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py b/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py index 2aad36e6d01d0..fd60d7b65193a 100644 --- a/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py @@ -97,7 +97,7 @@ def generate_scale_bias(): "Scale": ["affine_channel_scale"], "Bias": ["affine_channel_bias"], }, - outputs={"Out": ["affine_channel_ouput"]}, + outputs={"Out": ["affine_channel_output"]}, data_layout=data_format, ) if has_bias: @@ -121,7 +121,7 @@ def generate_scale_bias(): data_gen=partial(generate_scale_bias) ), }, - outputs=["affine_channel_ouput"], + outputs=["affine_channel_output"], ) if has_bias: program_config.weights["conv2d_bias"] = TensorConfig(